Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

View file

@ -0,0 +1,230 @@
use crate::{sys, Token};
use std::fmt;
/// A readiness event.
///
/// `Event` is a readiness state paired with a [`Token`]. It is returned by
/// [`Poll::poll`].
///
/// For more documentation on polling and events, see [`Poll`].
///
/// [`Poll::poll`]: ../struct.Poll.html#method.poll
/// [`Poll`]: ../struct.Poll.html
/// [`Token`]: ../struct.Token.html
#[derive(Clone)]
#[repr(transparent)]
pub struct Event {
inner: sys::Event,
}
impl Event {
/// Returns the event's token.
pub fn token(&self) -> Token {
sys::event::token(&self.inner)
}
/// Returns true if the event contains readable readiness.
///
/// # Notes
///
/// Out-of-band (OOB) data also triggers readable events. But must
/// application don't actually read OOB data, this could leave an
/// application open to a Denial-of-Service (Dos) attack, see
/// <https://github.com/sandstorm-io/sandstorm-website/blob/58f93346028c0576e8147627667328eaaf4be9fa/_posts/2015-04-08-osx-security-bug.md>.
/// However because Mio uses edge-triggers it will not result in an infinite
/// loop as described in the article above.
pub fn is_readable(&self) -> bool {
sys::event::is_readable(&self.inner)
}
/// Returns true if the event contains writable readiness.
pub fn is_writable(&self) -> bool {
sys::event::is_writable(&self.inner)
}
/// Returns true if the event contains error readiness.
///
/// Error events occur when the socket enters an error state. In this case,
/// the socket will also receive a readable or writable event. Reading or
/// writing to the socket will result in an error.
///
/// # Notes
///
/// Method is available on all platforms, but not all platforms trigger the
/// error event.
///
/// The table below shows what flags are checked on what OS.
///
/// | [OS selector] | Flag(s) checked |
/// |---------------|-----------------|
/// | [epoll] | `EPOLLERR` |
/// | [kqueue] | `EV_ERROR` and `EV_EOF` with `fflags` set to `0`. |
///
/// [OS selector]: ../struct.Poll.html#implementation-notes
/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html
/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
pub fn is_error(&self) -> bool {
sys::event::is_error(&self.inner)
}
/// Returns true if the event contains read closed readiness.
///
/// # Notes
///
/// Read closed readiness can be expected after any of the following have
/// occurred:
/// * The local stream has shutdown the read half of its socket
/// * The local stream has shutdown both the read half and the write half
/// of its socket
/// * The peer stream has shutdown the write half its socket; this sends a
/// `FIN` packet that has been received by the local stream
///
/// Method is a best effort implementation. While some platforms may not
/// return readiness when read half is closed, it is guaranteed that
/// false-positives will not occur.
///
/// The table below shows what flags are checked on what OS.
///
/// | [OS selector] | Flag(s) checked |
/// |---------------|-----------------|
/// | [epoll] | `EPOLLHUP`, or |
/// | | `EPOLLIN` and `EPOLLRDHUP` |
/// | [kqueue] | `EV_EOF` |
///
/// [OS selector]: ../struct.Poll.html#implementation-notes
/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html
/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
pub fn is_read_closed(&self) -> bool {
sys::event::is_read_closed(&self.inner)
}
/// Returns true if the event contains write closed readiness.
///
/// # Notes
///
/// On [epoll] this is essentially a check for `EPOLLHUP` flag as the
/// local stream shutting down its write half does not trigger this event.
///
/// On [kqueue] the local stream shutting down the write half of its
/// socket will trigger this event.
///
/// Method is a best effort implementation. While some platforms may not
/// return readiness when write half is closed, it is guaranteed that
/// false-positives will not occur.
///
/// The table below shows what flags are checked on what OS.
///
/// | [OS selector] | Flag(s) checked |
/// |---------------|-----------------|
/// | [epoll] | `EPOLLHUP`, or |
/// | | only `EPOLLERR`, or |
/// | | `EPOLLOUT` and `EPOLLERR` |
/// | [kqueue] | `EV_EOF` |
///
/// [OS selector]: ../struct.Poll.html#implementation-notes
/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html
/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
pub fn is_write_closed(&self) -> bool {
sys::event::is_write_closed(&self.inner)
}
/// Returns true if the event contains priority readiness.
///
/// # Notes
///
/// Method is available on all platforms, but not all platforms trigger the
/// priority event.
///
/// The table below shows what flags are checked on what OS.
///
/// | [OS selector] | Flag(s) checked |
/// |---------------|-----------------|
/// | [epoll] | `EPOLLPRI` |
/// | [kqueue] | *Not supported* |
///
/// [OS selector]: ../struct.Poll.html#implementation-notes
/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html
/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
#[inline]
pub fn is_priority(&self) -> bool {
sys::event::is_priority(&self.inner)
}
/// Returns true if the event contains AIO readiness.
///
/// # Notes
///
/// Method is available on all platforms, but not all platforms support AIO.
///
/// The table below shows what flags are checked on what OS.
///
/// | [OS selector] | Flag(s) checked |
/// |---------------|-----------------|
/// | [epoll] | *Not supported* |
/// | [kqueue]<sup>1</sup> | `EVFILT_AIO` |
///
/// 1: Only supported on DragonFly BSD, FreeBSD, iOS and macOS.
///
/// [OS selector]: ../struct.Poll.html#implementation-notes
/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html
/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
pub fn is_aio(&self) -> bool {
sys::event::is_aio(&self.inner)
}
/// Returns true if the event contains LIO readiness.
///
/// # Notes
///
/// Method is available on all platforms, but only FreeBSD supports LIO. On
/// FreeBSD this method checks the `EVFILT_LIO` flag.
pub fn is_lio(&self) -> bool {
sys::event::is_lio(&self.inner)
}
/// Create a reference to an `Event` from a platform specific event.
pub(crate) fn from_sys_event_ref(sys_event: &sys::Event) -> &Event {
unsafe {
// This is safe because the memory layout of `Event` is
// the same as `sys::Event` due to the `repr(transparent)` attribute.
&*(sys_event as *const sys::Event as *const Event)
}
}
}
/// When the [alternate] flag is enabled this will print platform specific
/// details, for example the fields of the `kevent` structure on platforms that
/// use `kqueue(2)`. Note however that the output of this implementation is
/// **not** consider a part of the stable API.
///
/// [alternate]: fmt::Formatter::alternate
impl fmt::Debug for Event {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let alternate = f.alternate();
let mut d = f.debug_struct("Event");
d.field("token", &self.token())
.field("readable", &self.is_readable())
.field("writable", &self.is_writable())
.field("error", &self.is_error())
.field("read_closed", &self.is_read_closed())
.field("write_closed", &self.is_write_closed())
.field("priority", &self.is_priority())
.field("aio", &self.is_aio())
.field("lio", &self.is_lio());
if alternate {
struct EventDetails<'a>(&'a sys::Event);
impl<'a> fmt::Debug for EventDetails<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
sys::event::debug_details(f, self.0)
}
}
d.field("details", &EventDetails(&self.inner)).finish()
} else {
d.finish()
}
}
}

View file

@ -0,0 +1,230 @@
use crate::event::Event;
use crate::sys;
use std::fmt;
/// A collection of readiness events.
///
/// `Events` is passed as an argument to [`Poll::poll`] and will be used to
/// receive any new readiness events received since the last poll. Usually, a
/// single `Events` instance is created at the same time as a [`Poll`] and
/// reused on each call to [`Poll::poll`].
///
/// See [`Poll`] for more documentation on polling.
///
/// [`Poll::poll`]: ../struct.Poll.html#method.poll
/// [`Poll`]: ../struct.Poll.html
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll};
/// use std::time::Duration;
///
/// let mut events = Events::with_capacity(1024);
/// let mut poll = Poll::new()?;
/// #
/// # assert!(events.is_empty());
///
/// // Register `event::Source`s with `poll`.
///
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
///
/// for event in events.iter() {
/// println!("Got an event for {:?}", event.token());
/// }
/// # Ok(())
/// # }
/// ```
pub struct Events {
inner: sys::Events,
}
/// [`Events`] iterator.
///
/// This struct is created by the [`iter`] method on [`Events`].
///
/// [`Events`]: struct.Events.html
/// [`iter`]: struct.Events.html#method.iter
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll};
/// use std::time::Duration;
///
/// let mut events = Events::with_capacity(1024);
/// let mut poll = Poll::new()?;
///
/// // Register handles with `poll`.
///
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
///
/// for event in events.iter() {
/// println!("Got an event for {:?}", event.token());
/// }
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct Iter<'a> {
inner: &'a Events,
pos: usize,
}
impl Events {
/// Return a new `Events` capable of holding up to `capacity` events.
///
/// # Examples
///
/// ```
/// use mio::Events;
///
/// let events = Events::with_capacity(1024);
/// assert_eq!(1024, events.capacity());
/// ```
pub fn with_capacity(capacity: usize) -> Events {
Events {
inner: sys::Events::with_capacity(capacity),
}
}
/// Returns the number of `Event` values that `self` can hold.
///
/// ```
/// use mio::Events;
///
/// let events = Events::with_capacity(1024);
/// assert_eq!(1024, events.capacity());
/// ```
pub fn capacity(&self) -> usize {
self.inner.capacity()
}
/// Returns `true` if `self` contains no `Event` values.
///
/// # Examples
///
/// ```
/// use mio::Events;
///
/// let events = Events::with_capacity(1024);
/// assert!(events.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Returns an iterator over the `Event` values.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll};
/// use std::time::Duration;
///
/// let mut events = Events::with_capacity(1024);
/// let mut poll = Poll::new()?;
///
/// // Register handles with `poll`.
///
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
///
/// for event in events.iter() {
/// println!("Got an event for {:?}", event.token());
/// }
/// # Ok(())
/// # }
/// ```
pub fn iter(&self) -> Iter<'_> {
Iter {
inner: self,
pos: 0,
}
}
/// Clearing all `Event` values from container explicitly.
///
/// # Notes
///
/// Events are cleared before every `poll`, so it is not required to call
/// this manually.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll};
/// use std::time::Duration;
///
/// let mut events = Events::with_capacity(1024);
/// let mut poll = Poll::new()?;
///
/// // Register handles with `poll`.
///
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
///
/// // Clear all events.
/// events.clear();
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn clear(&mut self) {
self.inner.clear();
}
/// Returns the inner `sys::Events`.
pub(crate) fn sys(&mut self) -> &mut sys::Events {
&mut self.inner
}
}
impl<'a> IntoIterator for &'a Events {
type Item = &'a Event;
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a> Iterator for Iter<'a> {
type Item = &'a Event;
fn next(&mut self) -> Option<Self::Item> {
let ret = self
.inner
.inner
.get(self.pos)
.map(Event::from_sys_event_ref);
self.pos += 1;
ret
}
fn size_hint(&self) -> (usize, Option<usize>) {
let size = self.inner.inner.len();
(size, Some(size))
}
fn count(self) -> usize {
self.inner.inner.len()
}
}
impl fmt::Debug for Events {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}

10
third-party/vendor/mio/src/event/mod.rs vendored Normal file
View file

@ -0,0 +1,10 @@
//! Readiness event types and utilities.
#[allow(clippy::module_inception)]
mod event;
mod events;
mod source;
pub use self::event::Event;
pub use self::events::{Events, Iter};
pub use self::source::Source;

View file

@ -0,0 +1,139 @@
use crate::{Interest, Registry, Token};
use std::io;
/// An event source that may be registered with [`Registry`].
///
/// Types that implement `event::Source` can be registered with
/// `Registry`. Users of Mio **should not** use the `event::Source` trait
/// functions directly. Instead, the equivalent functions on `Registry` should
/// be used.
///
/// See [`Registry`] for more details.
///
/// [`Registry`]: ../struct.Registry.html
///
/// # Implementing `event::Source`
///
/// Event sources are always backed by system handles, such as sockets or other
/// system handles. These `event::Source`s will be monitored by the system
/// selector. An implementation of `Source` will almost always delegates to a
/// lower level handle. Examples of this are [`TcpStream`]s, or the *unix only*
/// [`SourceFd`].
///
/// [`TcpStream`]: ../net/struct.TcpStream.html
/// [`SourceFd`]: ../unix/struct.SourceFd.html
///
/// # Dropping `event::Source`s
///
/// All `event::Source`s, unless otherwise specified, need to be [deregistered]
/// before being dropped for them to not leak resources. This goes against the
/// normal drop behaviour of types in Rust which cleanup after themselves, e.g.
/// a `File` will close itself. However since deregistering needs access to
/// [`Registry`] this cannot be done while being dropped.
///
/// [deregistered]: ../struct.Registry.html#method.deregister
///
/// # Examples
///
/// Implementing `Source` on a struct containing a socket:
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// use mio::{Interest, Registry, Token};
/// use mio::event::Source;
/// use mio::net::TcpStream;
///
/// use std::io;
///
/// # #[allow(dead_code)]
/// pub struct MySource {
/// socket: TcpStream,
/// }
///
/// impl Source for MySource {
/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
/// -> io::Result<()>
/// {
/// // Delegate the `register` call to `socket`
/// self.socket.register(registry, token, interests)
/// }
///
/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
/// -> io::Result<()>
/// {
/// // Delegate the `reregister` call to `socket`
/// self.socket.reregister(registry, token, interests)
/// }
///
/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
/// // Delegate the `deregister` call to `socket`
/// self.socket.deregister(registry)
/// }
/// }
/// ```
pub trait Source {
/// Register `self` with the given `Registry` instance.
///
/// This function should not be called directly. Use [`Registry::register`]
/// instead. Implementors should handle registration by delegating the call
/// to another `Source` type.
///
/// [`Registry::register`]: ../struct.Registry.html#method.register
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()>;
/// Re-register `self` with the given `Registry` instance.
///
/// This function should not be called directly. Use
/// [`Registry::reregister`] instead. Implementors should handle
/// re-registration by either delegating the call to another `Source` type.
///
/// [`Registry::reregister`]: ../struct.Registry.html#method.reregister
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()>;
/// Deregister `self` from the given `Registry` instance.
///
/// This function should not be called directly. Use
/// [`Registry::deregister`] instead. Implementors should handle
/// deregistration by delegating the call to another `Source` type.
///
/// [`Registry::deregister`]: ../struct.Registry.html#method.deregister
fn deregister(&mut self, registry: &Registry) -> io::Result<()>;
}
impl<T> Source for Box<T>
where
T: Source + ?Sized,
{
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
(**self).register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
(**self).reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
(**self).deregister(registry)
}
}

200
third-party/vendor/mio/src/interest.rs vendored Normal file
View file

@ -0,0 +1,200 @@
use std::num::NonZeroU8;
use std::{fmt, ops};
/// Interest used in registering.
///
/// Interest are used in [registering] [`event::Source`]s with [`Poll`], they
/// indicate what readiness should be monitored for. For example if a socket is
/// registered with [readable] interests and the socket becomes writable, no
/// event will be returned from a call to [`poll`].
///
/// [registering]: struct.Registry.html#method.register
/// [`event::Source`]: ./event/trait.Source.html
/// [`Poll`]: struct.Poll.html
/// [readable]: struct.Interest.html#associatedconstant.READABLE
/// [`poll`]: struct.Poll.html#method.poll
#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub struct Interest(NonZeroU8);
// These must be unique.
const READABLE: u8 = 0b0001;
const WRITABLE: u8 = 0b0010;
// The following are not available on all platforms.
const AIO: u8 = 0b0100;
const LIO: u8 = 0b1000;
const PRIORITY: u8 = 0b10000;
impl Interest {
/// Returns a `Interest` set representing readable interests.
pub const READABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(READABLE) });
/// Returns a `Interest` set representing writable interests.
pub const WRITABLE: Interest = Interest(unsafe { NonZeroU8::new_unchecked(WRITABLE) });
/// Returns a `Interest` set representing AIO completion interests.
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
pub const AIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(AIO) });
/// Returns a `Interest` set representing LIO completion interests.
#[cfg(target_os = "freebsd")]
pub const LIO: Interest = Interest(unsafe { NonZeroU8::new_unchecked(LIO) });
/// Returns a `Interest` set representing priority completion interests.
#[cfg(any(target_os = "linux", target_os = "android"))]
pub const PRIORITY: Interest = Interest(unsafe { NonZeroU8::new_unchecked(PRIORITY) });
/// Add together two `Interest`.
///
/// This does the same thing as the `BitOr` implementation, but is a
/// constant function.
///
/// ```
/// use mio::Interest;
///
/// const INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE);
/// # fn silent_dead_code_warning(_: Interest) { }
/// # silent_dead_code_warning(INTERESTS)
/// ```
#[allow(clippy::should_implement_trait)]
#[must_use = "this returns the result of the operation, without modifying the original"]
pub const fn add(self, other: Interest) -> Interest {
Interest(unsafe { NonZeroU8::new_unchecked(self.0.get() | other.0.get()) })
}
/// Removes `other` `Interest` from `self`.
///
/// Returns `None` if the set would be empty after removing `other`.
///
/// ```
/// use mio::Interest;
///
/// const RW_INTERESTS: Interest = Interest::READABLE.add(Interest::WRITABLE);
///
/// // As long a one interest remain this will return `Some`.
/// let w_interest = RW_INTERESTS.remove(Interest::READABLE).unwrap();
/// assert!(!w_interest.is_readable());
/// assert!(w_interest.is_writable());
///
/// // Removing all interests from the set will return `None`.
/// assert_eq!(w_interest.remove(Interest::WRITABLE), None);
///
/// // Its also possible to remove multiple interests at once.
/// assert_eq!(RW_INTERESTS.remove(RW_INTERESTS), None);
/// ```
#[must_use = "this returns the result of the operation, without modifying the original"]
pub fn remove(self, other: Interest) -> Option<Interest> {
NonZeroU8::new(self.0.get() & !other.0.get()).map(Interest)
}
/// Returns true if the value includes readable readiness.
#[must_use]
pub const fn is_readable(self) -> bool {
(self.0.get() & READABLE) != 0
}
/// Returns true if the value includes writable readiness.
#[must_use]
pub const fn is_writable(self) -> bool {
(self.0.get() & WRITABLE) != 0
}
/// Returns true if `Interest` contains AIO readiness.
#[must_use]
pub const fn is_aio(self) -> bool {
(self.0.get() & AIO) != 0
}
/// Returns true if `Interest` contains LIO readiness.
#[must_use]
pub const fn is_lio(self) -> bool {
(self.0.get() & LIO) != 0
}
/// Returns true if `Interest` contains priority readiness.
#[must_use]
pub const fn is_priority(self) -> bool {
(self.0.get() & PRIORITY) != 0
}
}
impl ops::BitOr for Interest {
type Output = Self;
#[inline]
fn bitor(self, other: Self) -> Self {
self.add(other)
}
}
impl ops::BitOrAssign for Interest {
#[inline]
fn bitor_assign(&mut self, other: Self) {
self.0 = (*self | other).0;
}
}
impl fmt::Debug for Interest {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut one = false;
if self.is_readable() {
if one {
write!(fmt, " | ")?
}
write!(fmt, "READABLE")?;
one = true
}
if self.is_writable() {
if one {
write!(fmt, " | ")?
}
write!(fmt, "WRITABLE")?;
one = true
}
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
{
if self.is_aio() {
if one {
write!(fmt, " | ")?
}
write!(fmt, "AIO")?;
one = true
}
}
#[cfg(target_os = "freebsd")]
{
if self.is_lio() {
if one {
write!(fmt, " | ")?
}
write!(fmt, "LIO")?;
one = true
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
{
if self.is_priority() {
if one {
write!(fmt, " | ")?
}
write!(fmt, "PRIORITY")?;
one = true
}
}
debug_assert!(one, "printing empty interests");
Ok(())
}
}

332
third-party/vendor/mio/src/io_source.rs vendored Normal file
View file

@ -0,0 +1,332 @@
use std::ops::{Deref, DerefMut};
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
#[cfg(target_os = "wasi")]
use std::os::wasi::io::AsRawFd;
#[cfg(windows)]
use std::os::windows::io::AsRawSocket;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{fmt, io};
use crate::sys::IoSourceState;
use crate::{event, Interest, Registry, Token};
/// Adapter for a [`RawFd`] or [`RawSocket`] providing an [`event::Source`]
/// implementation.
///
/// `IoSource` enables registering any FD or socket wrapper with [`Poll`].
///
/// While only implementations for TCP, UDP, and UDS (Unix only) are provided,
/// Mio supports registering any FD or socket that can be registered with the
/// underlying OS selector. `IoSource` provides the necessary bridge.
///
/// [`RawFd`]: std::os::unix::io::RawFd
/// [`RawSocket`]: std::os::windows::io::RawSocket
///
/// # Notes
///
/// To handle the registrations and events properly **all** I/O operations (such
/// as `read`, `write`, etc.) must go through the [`do_io`] method to ensure the
/// internal state is updated accordingly.
///
/// [`Poll`]: crate::Poll
/// [`do_io`]: IoSource::do_io
/*
///
/// # Examples
///
/// Basic usage.
///
/// ```
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Interest, Poll, Token};
/// use mio::IoSource;
///
/// use std::net;
///
/// let poll = Poll::new()?;
///
/// // Bind a std TCP listener.
/// let listener = net::TcpListener::bind("127.0.0.1:0")?;
/// // Wrap it in the `IoSource` type.
/// let mut listener = IoSource::new(listener);
///
/// // Register the listener.
/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?;
/// # Ok(())
/// # }
/// ```
*/
pub struct IoSource<T> {
state: IoSourceState,
inner: T,
#[cfg(debug_assertions)]
selector_id: SelectorId,
}
impl<T> IoSource<T> {
/// Create a new `IoSource`.
pub fn new(io: T) -> IoSource<T> {
IoSource {
state: IoSourceState::new(),
inner: io,
#[cfg(debug_assertions)]
selector_id: SelectorId::new(),
}
}
/// Execute an I/O operations ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
pub fn do_io<F, R>(&self, f: F) -> io::Result<R>
where
F: FnOnce(&T) -> io::Result<R>,
{
self.state.do_io(f, &self.inner)
}
/// Returns the I/O source, dropping the state.
///
/// # Notes
///
/// To ensure no more events are to be received for this I/O source first
/// [`deregister`] it.
///
/// [`deregister`]: Registry::deregister
pub fn into_inner(self) -> T {
self.inner
}
}
/// Be careful when using this method. All I/O operations that may block must go
/// through the [`do_io`] method.
///
/// [`do_io`]: IoSource::do_io
impl<T> Deref for IoSource<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
/// Be careful when using this method. All I/O operations that may block must go
/// through the [`do_io`] method.
///
/// [`do_io`]: IoSource::do_io
impl<T> DerefMut for IoSource<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[cfg(unix)]
impl<T> event::Source for IoSource<T>
where
T: AsRawFd,
{
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.associate(registry)?;
self.state
.register(registry, token, interests, self.inner.as_raw_fd())
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.check_association(registry)?;
self.state
.reregister(registry, token, interests, self.inner.as_raw_fd())
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.remove_association(registry)?;
self.state.deregister(registry, self.inner.as_raw_fd())
}
}
#[cfg(windows)]
impl<T> event::Source for IoSource<T>
where
T: AsRawSocket,
{
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.associate(registry)?;
self.state
.register(registry, token, interests, self.inner.as_raw_socket())
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.check_association(registry)?;
self.state.reregister(registry, token, interests)
}
fn deregister(&mut self, _registry: &Registry) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.remove_association(_registry)?;
self.state.deregister()
}
}
#[cfg(target_os = "wasi")]
impl<T> event::Source for IoSource<T>
where
T: AsRawFd,
{
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.associate(registry)?;
registry
.selector()
.register(self.inner.as_raw_fd() as _, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.check_association(registry)?;
registry
.selector()
.reregister(self.inner.as_raw_fd() as _, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
#[cfg(debug_assertions)]
self.selector_id.remove_association(registry)?;
registry.selector().deregister(self.inner.as_raw_fd() as _)
}
}
impl<T> fmt::Debug for IoSource<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
/// Used to associate an `IoSource` with a `sys::Selector`.
#[cfg(debug_assertions)]
#[derive(Debug)]
struct SelectorId {
id: AtomicUsize,
}
#[cfg(debug_assertions)]
impl SelectorId {
/// Value of `id` if `SelectorId` is not associated with any
/// `sys::Selector`. Valid selector ids start at 1.
const UNASSOCIATED: usize = 0;
/// Create a new `SelectorId`.
const fn new() -> SelectorId {
SelectorId {
id: AtomicUsize::new(Self::UNASSOCIATED),
}
}
/// Associate an I/O source with `registry`, returning an error if its
/// already registered.
fn associate(&self, registry: &Registry) -> io::Result<()> {
let registry_id = registry.selector().id();
let previous_id = self.id.swap(registry_id, Ordering::AcqRel);
if previous_id == Self::UNASSOCIATED {
Ok(())
} else {
Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"I/O source already registered with a `Registry`",
))
}
}
/// Check the association of an I/O source with `registry`, returning an
/// error if its registered with a different `Registry` or not registered at
/// all.
fn check_association(&self, registry: &Registry) -> io::Result<()> {
let registry_id = registry.selector().id();
let id = self.id.load(Ordering::Acquire);
if id == registry_id {
Ok(())
} else if id == Self::UNASSOCIATED {
Err(io::Error::new(
io::ErrorKind::NotFound,
"I/O source not registered with `Registry`",
))
} else {
Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"I/O source already registered with a different `Registry`",
))
}
}
/// Remove a previously made association from `registry`, returns an error
/// if it was not previously associated with `registry`.
fn remove_association(&self, registry: &Registry) -> io::Result<()> {
let registry_id = registry.selector().id();
let previous_id = self.id.swap(Self::UNASSOCIATED, Ordering::AcqRel);
if previous_id == registry_id {
Ok(())
} else {
Err(io::Error::new(
io::ErrorKind::NotFound,
"I/O source not registered with `Registry`",
))
}
}
}
#[cfg(debug_assertions)]
impl Clone for SelectorId {
fn clone(&self) -> SelectorId {
SelectorId {
id: AtomicUsize::new(self.id.load(Ordering::Acquire)),
}
}
}

266
third-party/vendor/mio/src/lib.rs vendored Normal file
View file

@ -0,0 +1,266 @@
#![deny(
missing_docs,
missing_debug_implementations,
rust_2018_idioms,
unused_imports,
dead_code
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
// Disallow warnings when running tests.
#![cfg_attr(test, deny(warnings))]
// Disallow warnings in examples.
#![doc(test(attr(deny(warnings))))]
//! Mio is a fast, low-level I/O library for Rust focusing on non-blocking APIs
//! and event notification for building high performance I/O apps with as little
//! overhead as possible over the OS abstractions.
//!
//! # Usage
//!
//! Using Mio starts by creating a [`Poll`], which reads events from the OS and
//! puts them into [`Events`]. You can handle I/O events from the OS with it.
//!
//! For more detail, see [`Poll`].
//!
//! [`Poll`]: ../mio/struct.Poll.html
//! [`Events`]: ../mio/event/struct.Events.html
//!
//! ## Examples
//!
//! Examples can found in the `examples` directory of the source code, or [on
//! GitHub].
//!
//! [on GitHub]: https://github.com/tokio-rs/mio/tree/master/examples
//!
//! ## Guide
//!
//! A getting started guide is available in the [`guide`] module.
//!
//! ## Available features
//!
//! The available features are described in the [`features`] module.
// macros used internally
#[macro_use]
mod macros;
mod interest;
mod poll;
mod sys;
mod token;
#[cfg(not(target_os = "wasi"))]
mod waker;
pub mod event;
cfg_io_source! {
mod io_source;
}
cfg_net! {
pub mod net;
}
#[doc(no_inline)]
pub use event::Events;
pub use interest::Interest;
pub use poll::{Poll, Registry};
pub use token::Token;
#[cfg(not(target_os = "wasi"))]
pub use waker::Waker;
#[cfg(all(unix, feature = "os-ext"))]
#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "os-ext"))))]
pub mod unix {
//! Unix only extensions.
pub mod pipe {
//! Unix pipe.
//!
//! See the [`new`] function for documentation.
pub use crate::sys::pipe::{new, Receiver, Sender};
}
pub use crate::sys::SourceFd;
}
#[cfg(all(windows, feature = "os-ext"))]
#[cfg_attr(docsrs, doc(cfg(all(windows, feature = "os-ext"))))]
pub mod windows {
//! Windows only extensions.
pub use crate::sys::named_pipe::NamedPipe;
}
pub mod features {
//! # Mio's optional features.
//!
//! This document describes the available features in Mio.
//!
#![cfg_attr(feature = "os-poll", doc = "## `os-poll` (enabled)")]
#![cfg_attr(not(feature = "os-poll"), doc = "## `os-poll` (disabled)")]
//!
//! Mio by default provides only a shell implementation that `panic!`s the
//! moment it is actually run. To run it requires OS support, this is
//! enabled by activating the `os-poll` feature.
//!
//! This makes `Poll`, `Registry` and `Waker` functional.
//!
#![cfg_attr(feature = "os-ext", doc = "## `os-ext` (enabled)")]
#![cfg_attr(not(feature = "os-ext"), doc = "## `os-ext` (disabled)")]
//!
//! `os-ext` enables additional OS specific facilities. These facilities can
//! be found in the `unix` and `windows` module.
//!
#![cfg_attr(feature = "net", doc = "## Network types (enabled)")]
#![cfg_attr(not(feature = "net"), doc = "## Network types (disabled)")]
//!
//! The `net` feature enables networking primitives in the `net` module.
}
pub mod guide {
//! # Getting started guide.
//!
//! In this guide we'll do the following:
//!
//! 1. Create a [`Poll`] instance (and learn what it is).
//! 2. Register an [event source].
//! 3. Create an event loop.
//!
//! At the end you'll have a very small (but quick) TCP server that accepts
//! connections and then drops (disconnects) them.
//!
//! ## 1. Creating a `Poll` instance
//!
//! Using Mio starts by creating a [`Poll`] instance, which monitors events
//! from the OS and puts them into [`Events`]. This allows us to execute I/O
//! operations based on what operations are ready.
//!
//! [`Poll`]: ../struct.Poll.html
//! [`Events`]: ../event/struct.Events.html
//!
#![cfg_attr(feature = "os-poll", doc = "```")]
#![cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
//! # use mio::{Poll, Events};
//! # fn main() -> std::io::Result<()> {
//! // `Poll` allows for polling of readiness events.
//! let poll = Poll::new()?;
//! // `Events` is collection of readiness `Event`s and can be filled by
//! // calling `Poll::poll`.
//! let events = Events::with_capacity(128);
//! # drop((poll, events));
//! # Ok(())
//! # }
//! ```
//!
//! For example if we're using a [`TcpListener`], we'll only want to
//! attempt to accept an incoming connection *iff* any connections are
//! queued and ready to be accepted. We don't want to waste our time if no
//! connections are ready.
//!
//! [`TcpListener`]: ../net/struct.TcpListener.html
//!
//! ## 2. Registering event source
//!
//! After we've created a [`Poll`] instance that monitors events from the OS
//! for us, we need to provide it with a source of events. This is done by
//! registering an [event source]. As the name “event source” suggests it is
//! a source of events which can be polled using a `Poll` instance. On Unix
//! systems this is usually a file descriptor, or a socket/handle on
//! Windows.
//!
//! In the example below we'll use a [`TcpListener`] for which we'll receive
//! an event (from [`Poll`]) once a connection is ready to be accepted.
//!
//! [event source]: ../event/trait.Source.html
//!
#![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
//! # use mio::net::TcpListener;
//! # use mio::{Poll, Token, Interest};
//! # fn main() -> std::io::Result<()> {
//! # let poll = Poll::new()?;
//! # let address = "127.0.0.1:0".parse().unwrap();
//! // Create a `TcpListener`, binding it to `address`.
//! let mut listener = TcpListener::bind(address)?;
//!
//! // Next we register it with `Poll` to receive events for it. The `SERVER`
//! // `Token` is used to determine that we received an event for the listener
//! // later on.
//! const SERVER: Token = Token(0);
//! poll.registry().register(&mut listener, SERVER, Interest::READABLE)?;
//! # Ok(())
//! # }
//! ```
//!
//! Multiple event sources can be [registered] (concurrently), so we can
//! monitor multiple sources at a time.
//!
//! [registered]: ../struct.Registry.html#method.register
//!
//! ## 3. Creating the event loop
//!
//! After we've created a [`Poll`] instance and registered one or more
//! [event sources] with it, we can [poll] it for events. Polling for events
//! is simple, we need a container to store the events: [`Events`] and need
//! to do something based on the polled events (this part is up to you, we
//! can't do it all!). If we do this in a loop we've got ourselves an event
//! loop.
//!
//! The example below shows the event loop in action, completing our small
//! TCP server.
//!
//! [poll]: ../struct.Poll.html#method.poll
//! [event sources]: ../event/trait.Source.html
//!
#![cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#![cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
//! # use std::io;
//! # use std::time::Duration;
//! # use mio::net::TcpListener;
//! # use mio::{Poll, Token, Interest, Events};
//! # fn main() -> io::Result<()> {
//! # let mut poll = Poll::new()?;
//! # let mut events = Events::with_capacity(128);
//! # let address = "127.0.0.1:0".parse().unwrap();
//! # let mut listener = TcpListener::bind(address)?;
//! # const SERVER: Token = Token(0);
//! # poll.registry().register(&mut listener, SERVER, Interest::READABLE)?;
//! // Start our event loop.
//! loop {
//! // Poll the OS for events, waiting at most 100 milliseconds.
//! poll.poll(&mut events, Some(Duration::from_millis(100)))?;
//!
//! // Process each event.
//! for event in events.iter() {
//! // We can use the token we previously provided to `register` to
//! // determine for which type the event is.
//! match event.token() {
//! SERVER => loop {
//! // One or more connections are ready, so we'll attempt to
//! // accept them (in a loop).
//! match listener.accept() {
//! Ok((connection, address)) => {
//! println!("Got a connection from: {}", address);
//! # drop(connection);
//! },
//! // A "would block error" is returned if the operation
//! // is not ready, so we'll stop trying to accept
//! // connections.
//! Err(ref err) if would_block(err) => break,
//! Err(err) => return Err(err),
//! }
//! }
//! # _ => unreachable!(),
//! }
//! }
//! # return Ok(());
//! }
//!
//! fn would_block(err: &io::Error) -> bool {
//! err.kind() == io::ErrorKind::WouldBlock
//! }
//! # }
//! ```
}

98
third-party/vendor/mio/src/macros.rs vendored Normal file
View file

@ -0,0 +1,98 @@
//! Macros to ease conditional code based on enabled features.
// Depending on the features not all macros are used.
#![allow(unused_macros)]
/// The `os-poll` feature is enabled.
macro_rules! cfg_os_poll {
($($item:item)*) => {
$(
#[cfg(feature = "os-poll")]
#[cfg_attr(docsrs, doc(cfg(feature = "os-poll")))]
$item
)*
}
}
/// The `os-poll` feature is disabled.
macro_rules! cfg_not_os_poll {
($($item:item)*) => {
$(
#[cfg(not(feature = "os-poll"))]
$item
)*
}
}
/// The `os-ext` feature is enabled.
macro_rules! cfg_os_ext {
($($item:item)*) => {
$(
#[cfg(feature = "os-ext")]
#[cfg_attr(docsrs, doc(cfg(feature = "os-ext")))]
$item
)*
}
}
/// The `net` feature is enabled.
macro_rules! cfg_net {
($($item:item)*) => {
$(
#[cfg(feature = "net")]
#[cfg_attr(docsrs, doc(cfg(feature = "net")))]
$item
)*
}
}
/// One of the features enabled that needs `IoSource`. That is `net` or `os-ext`
/// on Unix (for `pipe`).
macro_rules! cfg_io_source {
($($item:item)*) => {
$(
#[cfg(any(feature = "net", all(unix, feature = "os-ext")))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "net", all(unix, feature = "os-ext")))))]
$item
)*
}
}
/// The `os-ext` feature is enabled, or one of the features that need `os-ext`.
macro_rules! cfg_any_os_ext {
($($item:item)*) => {
$(
#[cfg(any(feature = "os-ext", feature = "net"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "os-ext", feature = "net"))))]
$item
)*
}
}
macro_rules! trace {
($($t:tt)*) => {
log!(trace, $($t)*)
}
}
macro_rules! warn {
($($t:tt)*) => {
log!(warn, $($t)*)
}
}
macro_rules! error {
($($t:tt)*) => {
log!(error, $($t)*)
}
}
macro_rules! log {
($level: ident, $($t:tt)*) => {
#[cfg(feature = "log")]
{ log::$level!($($t)*) }
// Silence unused variables warnings.
#[cfg(not(feature = "log"))]
{ if false { let _ = ( $($t)* ); } }
}
}

39
third-party/vendor/mio/src/net/mod.rs vendored Normal file
View file

@ -0,0 +1,39 @@
//! Networking primitives.
//!
//! The types provided in this module are non-blocking by default and are
//! designed to be portable across all supported Mio platforms. As long as the
//! [portability guidelines] are followed, the behavior should be identical no
//! matter the target platform.
//!
//! [portability guidelines]: ../struct.Poll.html#portability
//!
//! # Notes
//!
//! When using a datagram based socket, i.e. [`UdpSocket`] or [`UnixDatagram`],
//! its only possible to receive a packet once. This means that if you provide a
//! buffer that is too small you won't be able to receive the data anymore. How
//! OSs deal with this situation is different for each OS:
//! * Unixes, such as Linux, FreeBSD and macOS, will simply fill the buffer and
//! return the amount of bytes written. This means that if the returned value
//! is equal to the size of the buffer it may have only written a part of the
//! packet (or the packet has the same size as the buffer).
//! * Windows returns an `WSAEMSGSIZE` error.
//!
//! Mio does not change the value (either ok or error) returned by the OS, it's
//! up to the user handle this. How to deal with these difference is still up
//! for debate, specifically in
//! <https://github.com/rust-lang/rust/issues/55794>. The best advice we can
//! give is to always call receive with a large enough buffer.
mod tcp;
pub use self::tcp::{TcpListener, TcpStream};
#[cfg(not(target_os = "wasi"))]
mod udp;
#[cfg(not(target_os = "wasi"))]
pub use self::udp::UdpSocket;
#[cfg(unix)]
mod uds;
#[cfg(unix)]
pub use self::uds::{SocketAddr, UnixDatagram, UnixListener, UnixStream};

View file

@ -0,0 +1,248 @@
use std::net::{self, SocketAddr};
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(target_os = "wasi")]
use std::os::wasi::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
use std::{fmt, io};
use crate::io_source::IoSource;
use crate::net::TcpStream;
#[cfg(unix)]
use crate::sys::tcp::set_reuseaddr;
#[cfg(not(target_os = "wasi"))]
use crate::sys::tcp::{bind, listen, new_for_addr};
use crate::{event, sys, Interest, Registry, Token};
/// A structure representing a socket server
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Interest, Poll, Token};
/// use mio::net::TcpListener;
/// use std::time::Duration;
///
/// let mut listener = TcpListener::bind("127.0.0.1:34255".parse()?)?;
///
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(128);
///
/// // Register the socket with `Poll`
/// poll.registry().register(&mut listener, Token(0), Interest::READABLE)?;
///
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
///
/// // There may be a socket ready to be accepted
/// # Ok(())
/// # }
/// ```
pub struct TcpListener {
inner: IoSource<net::TcpListener>,
}
impl TcpListener {
/// Convenience method to bind a new TCP listener to the specified address
/// to receive new connections.
///
/// This function will take the following steps:
///
/// 1. Create a new TCP socket.
/// 2. Set the `SO_REUSEADDR` option on the socket on Unix.
/// 3. Bind the socket to the specified address.
/// 4. Calls `listen` on the socket to prepare it to receive new connections.
#[cfg(not(target_os = "wasi"))]
pub fn bind(addr: SocketAddr) -> io::Result<TcpListener> {
let socket = new_for_addr(addr)?;
#[cfg(unix)]
let listener = unsafe { TcpListener::from_raw_fd(socket) };
#[cfg(windows)]
let listener = unsafe { TcpListener::from_raw_socket(socket as _) };
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
set_reuseaddr(&listener.inner, true)?;
bind(&listener.inner, addr)?;
listen(&listener.inner, 1024)?;
Ok(listener)
}
/// Creates a new `TcpListener` from a standard `net::TcpListener`.
///
/// This function is intended to be used to wrap a TCP listener from the
/// standard library in the Mio equivalent. The conversion assumes nothing
/// about the underlying listener; ; it is left up to the user to set it
/// in non-blocking mode.
pub fn from_std(listener: net::TcpListener) -> TcpListener {
TcpListener {
inner: IoSource::new(listener),
}
}
/// Accepts a new `TcpStream`.
///
/// This may return an `Err(e)` where `e.kind()` is
/// `io::ErrorKind::WouldBlock`. This means a stream may be ready at a later
/// point and one should wait for an event before calling `accept` again.
///
/// If an accepted stream is returned, the remote address of the peer is
/// returned along with it.
pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
self.inner.do_io(|inner| {
sys::tcp::accept(inner).map(|(stream, addr)| (TcpStream::from_std(stream), addr))
})
}
/// Returns the local socket address of this listener.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.inner.local_addr()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.inner.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
pub fn ttl(&self) -> io::Result<u32> {
self.inner.ttl()
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
}
impl event::Source for TcpListener {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl fmt::Debug for TcpListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
#[cfg(unix)]
impl IntoRawFd for TcpListener {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
#[cfg(unix)]
impl AsRawFd for TcpListener {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for TcpListener {
/// Converts a `RawFd` to a `TcpListener`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
TcpListener::from_std(FromRawFd::from_raw_fd(fd))
}
}
#[cfg(windows)]
impl IntoRawSocket for TcpListener {
fn into_raw_socket(self) -> RawSocket {
self.inner.into_inner().into_raw_socket()
}
}
#[cfg(windows)]
impl AsRawSocket for TcpListener {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
#[cfg(windows)]
impl FromRawSocket for TcpListener {
/// Converts a `RawSocket` to a `TcpListener`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_socket(socket: RawSocket) -> TcpListener {
TcpListener::from_std(FromRawSocket::from_raw_socket(socket))
}
}
#[cfg(target_os = "wasi")]
impl IntoRawFd for TcpListener {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
#[cfg(target_os = "wasi")]
impl AsRawFd for TcpListener {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
#[cfg(target_os = "wasi")]
impl FromRawFd for TcpListener {
/// Converts a `RawFd` to a `TcpListener`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> TcpListener {
TcpListener::from_std(FromRawFd::from_raw_fd(fd))
}
}

View file

@ -0,0 +1,5 @@
mod listener;
pub use self::listener::TcpListener;
mod stream;
pub use self::stream::TcpStream;

View file

@ -0,0 +1,427 @@
use std::fmt;
use std::io::{self, IoSlice, IoSliceMut, Read, Write};
use std::net::{self, Shutdown, SocketAddr};
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(target_os = "wasi")]
use std::os::wasi::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
use crate::io_source::IoSource;
#[cfg(not(target_os = "wasi"))]
use crate::sys::tcp::{connect, new_for_addr};
use crate::{event, Interest, Registry, Token};
/// A non-blocking TCP stream between a local socket and a remote socket.
///
/// The socket will be closed when the value is dropped.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::net::{TcpListener, SocketAddr};
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// let address: SocketAddr = "127.0.0.1:0".parse()?;
/// let listener = TcpListener::bind(address)?;
/// use mio::{Events, Interest, Poll, Token};
/// use mio::net::TcpStream;
/// use std::time::Duration;
///
/// let mut stream = TcpStream::connect(listener.local_addr()?)?;
///
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(128);
///
/// // Register the socket with `Poll`
/// poll.registry().register(&mut stream, Token(0), Interest::WRITABLE)?;
///
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
///
/// // The socket might be ready at this point
/// # Ok(())
/// # }
/// ```
pub struct TcpStream {
inner: IoSource<net::TcpStream>,
}
impl TcpStream {
/// Create a new TCP stream and issue a non-blocking connect to the
/// specified address.
///
/// # Notes
///
/// The returned `TcpStream` may not be connected (and thus usable), unlike
/// the API found in `std::net::TcpStream`. Because Mio issues a
/// *non-blocking* connect it will not block the thread and instead return
/// an unconnected `TcpStream`.
///
/// Ensuring the returned stream is connected is surprisingly complex when
/// considering cross-platform support. Doing this properly should follow
/// the steps below, an example implementation can be found
/// [here](https://github.com/Thomasdezeeuw/heph/blob/0c4f1ab3eaf08bea1d65776528bfd6114c9f8374/src/net/tcp/stream.rs#L560-L622).
///
/// 1. Call `TcpStream::connect`
/// 2. Register the returned stream with at least [write interest].
/// 3. Wait for a (writable) event.
/// 4. Check `TcpStream::peer_addr`. If it returns `libc::EINPROGRESS` or
/// `ErrorKind::NotConnected` it means the stream is not yet connected,
/// go back to step 3. If it returns an address it means the stream is
/// connected, go to step 5. If another error is returned something
/// went wrong.
/// 5. Now the stream can be used.
///
/// This may return a `WouldBlock` in which case the socket connection
/// cannot be completed immediately, it usually means there are insufficient
/// entries in the routing cache.
///
/// [write interest]: Interest::WRITABLE
#[cfg(not(target_os = "wasi"))]
pub fn connect(addr: SocketAddr) -> io::Result<TcpStream> {
let socket = new_for_addr(addr)?;
#[cfg(unix)]
let stream = unsafe { TcpStream::from_raw_fd(socket) };
#[cfg(windows)]
let stream = unsafe { TcpStream::from_raw_socket(socket as _) };
connect(&stream.inner, addr)?;
Ok(stream)
}
/// Creates a new `TcpStream` from a standard `net::TcpStream`.
///
/// This function is intended to be used to wrap a TCP stream from the
/// standard library in the Mio equivalent. The conversion assumes nothing
/// about the underlying stream; it is left up to the user to set it in
/// non-blocking mode.
///
/// # Note
///
/// The TCP stream here will not have `connect` called on it, so it
/// should already be connected via some other means (be it manually, or
/// the standard library).
pub fn from_std(stream: net::TcpStream) -> TcpStream {
TcpStream {
inner: IoSource::new(stream),
}
}
/// Returns the socket address of the remote peer of this TCP connection.
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.inner.peer_addr()
}
/// Returns the socket address of the local half of this TCP connection.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.inner.local_addr()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O on the specified
/// portions to return immediately with an appropriate value (see the
/// documentation of `Shutdown`).
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.inner.shutdown(how)
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
///
/// If set, this option disables the Nagle algorithm. This means that
/// segments are always sent as soon as possible, even if there is only a
/// small amount of data. When not set, data is buffered until there is a
/// sufficient amount to send out, thereby avoiding the frequent sending of
/// small packets.
///
/// # Notes
///
/// On Windows make sure the stream is connected before calling this method,
/// by receiving an (writable) event. Trying to set `nodelay` on an
/// unconnected `TcpStream` is unspecified behavior.
pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> {
self.inner.set_nodelay(nodelay)
}
/// Gets the value of the `TCP_NODELAY` option on this socket.
///
/// For more information about this option, see [`set_nodelay`][link].
///
/// [link]: #method.set_nodelay
///
/// # Notes
///
/// On Windows make sure the stream is connected before calling this method,
/// by receiving an (writable) event. Trying to get `nodelay` on an
/// unconnected `TcpStream` is unspecified behavior.
pub fn nodelay(&self) -> io::Result<bool> {
self.inner.nodelay()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
///
/// # Notes
///
/// On Windows make sure the stream is connected before calling this method,
/// by receiving an (writable) event. Trying to set `ttl` on an
/// unconnected `TcpStream` is unspecified behavior.
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.inner.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// # Notes
///
/// On Windows make sure the stream is connected before calling this method,
/// by receiving an (writable) event. Trying to get `ttl` on an
/// unconnected `TcpStream` is unspecified behavior.
///
/// [link]: #method.set_ttl
pub fn ttl(&self) -> io::Result<u32> {
self.inner.ttl()
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue. On success,
/// returns the number of bytes peeked.
///
/// Successive calls return the same data. This is accomplished by passing
/// `MSG_PEEK` as a flag to the underlying recv system call.
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.peek(buf)
}
/// Execute an I/O operation ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
///
/// # Examples
///
#[cfg_attr(unix, doc = "```no_run")]
#[cfg_attr(windows, doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use std::io;
/// #[cfg(unix)]
/// use std::os::unix::io::AsRawFd;
/// #[cfg(windows)]
/// use std::os::windows::io::AsRawSocket;
/// use mio::net::TcpStream;
///
/// let address = "127.0.0.1:8080".parse().unwrap();
/// let stream = TcpStream::connect(address)?;
///
/// // Wait until the stream is readable...
///
/// // Read from the stream using a direct libc call, of course the
/// // `io::Read` implementation would be easier to use.
/// let mut buf = [0; 512];
/// let n = stream.try_io(|| {
/// let buf_ptr = &mut buf as *mut _ as *mut _;
/// #[cfg(unix)]
/// let res = unsafe { libc::recv(stream.as_raw_fd(), buf_ptr, buf.len(), 0) };
/// #[cfg(windows)]
/// let res = unsafe { libc::recvfrom(stream.as_raw_socket() as usize, buf_ptr, buf.len() as i32, 0, std::ptr::null_mut(), std::ptr::null_mut()) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("read {} bytes", n);
/// # Ok(())
/// # }
/// ```
pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T>,
{
self.inner.do_io(|_| f())
}
}
impl Read for TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read(buf))
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read_vectored(bufs))
}
}
impl<'a> Read for &'a TcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read(buf))
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read_vectored(bufs))
}
}
impl Write for TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write(buf))
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write_vectored(bufs))
}
fn flush(&mut self) -> io::Result<()> {
self.inner.do_io(|mut inner| inner.flush())
}
}
impl<'a> Write for &'a TcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write(buf))
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write_vectored(bufs))
}
fn flush(&mut self) -> io::Result<()> {
self.inner.do_io(|mut inner| inner.flush())
}
}
impl event::Source for TcpStream {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl fmt::Debug for TcpStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
#[cfg(unix)]
impl IntoRawFd for TcpStream {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
#[cfg(unix)]
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for TcpStream {
/// Converts a `RawFd` to a `TcpStream`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
TcpStream::from_std(FromRawFd::from_raw_fd(fd))
}
}
#[cfg(windows)]
impl IntoRawSocket for TcpStream {
fn into_raw_socket(self) -> RawSocket {
self.inner.into_inner().into_raw_socket()
}
}
#[cfg(windows)]
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
#[cfg(windows)]
impl FromRawSocket for TcpStream {
/// Converts a `RawSocket` to a `TcpStream`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_socket(socket: RawSocket) -> TcpStream {
TcpStream::from_std(FromRawSocket::from_raw_socket(socket))
}
}
#[cfg(target_os = "wasi")]
impl IntoRawFd for TcpStream {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
#[cfg(target_os = "wasi")]
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
#[cfg(target_os = "wasi")]
impl FromRawFd for TcpStream {
/// Converts a `RawFd` to a `TcpStream`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> TcpStream {
TcpStream::from_std(FromRawFd::from_raw_fd(fd))
}
}

697
third-party/vendor/mio/src/net/udp.rs vendored Normal file
View file

@ -0,0 +1,697 @@
//! Primitives for working with UDP.
//!
//! The types provided in this module are non-blocking by default and are
//! designed to be portable across all supported Mio platforms. As long as the
//! [portability guidelines] are followed, the behavior should be identical no
//! matter the target platform.
//!
//! [portability guidelines]: ../struct.Poll.html#portability
use crate::io_source::IoSource;
use crate::{event, sys, Interest, Registry, Token};
use std::fmt;
use std::io;
use std::net;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
/// A User Datagram Protocol socket.
///
/// This is an implementation of a bound UDP socket. This supports both IPv4 and
/// IPv6 addresses, and there is no corresponding notion of a server because UDP
/// is a datagram protocol.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// // An Echo program:
/// // SENDER -> sends a message.
/// // ECHOER -> listens and prints the message received.
///
/// use mio::net::UdpSocket;
/// use mio::{Events, Interest, Poll, Token};
/// use std::time::Duration;
///
/// const SENDER: Token = Token(0);
/// const ECHOER: Token = Token(1);
///
/// // This operation will fail if the address is in use, so we select different ports for each
/// // socket.
/// let mut sender_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
/// let mut echoer_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
///
/// // If we do not use connect here, SENDER and ECHOER would need to call send_to and recv_from
/// // respectively.
/// sender_socket.connect(echoer_socket.local_addr()?)?;
///
/// // We need a Poll to check if SENDER is ready to be written into, and if ECHOER is ready to be
/// // read from.
/// let mut poll = Poll::new()?;
///
/// // We register our sockets here so that we can check if they are ready to be written/read.
/// poll.registry().register(&mut sender_socket, SENDER, Interest::WRITABLE)?;
/// poll.registry().register(&mut echoer_socket, ECHOER, Interest::READABLE)?;
///
/// let msg_to_send = [9; 9];
/// let mut buffer = [0; 9];
///
/// let mut events = Events::with_capacity(128);
/// loop {
/// poll.poll(&mut events, Some(Duration::from_millis(100)))?;
/// for event in events.iter() {
/// match event.token() {
/// // Our SENDER is ready to be written into.
/// SENDER => {
/// let bytes_sent = sender_socket.send(&msg_to_send)?;
/// assert_eq!(bytes_sent, 9);
/// println!("sent {:?} -> {:?} bytes", msg_to_send, bytes_sent);
/// },
/// // Our ECHOER is ready to be read from.
/// ECHOER => {
/// let num_recv = echoer_socket.recv(&mut buffer)?;
/// println!("echo {:?} -> {:?}", buffer, num_recv);
/// buffer = [0; 9];
/// # _ = buffer; // Silence unused assignment warning.
/// # return Ok(());
/// }
/// _ => unreachable!()
/// }
/// }
/// }
/// # }
/// ```
pub struct UdpSocket {
inner: IoSource<net::UdpSocket>,
}
impl UdpSocket {
/// Creates a UDP socket from the given address.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// // We must bind it to an open address.
/// let socket = match UdpSocket::bind("127.0.0.1:0".parse()?) {
/// Ok(new_socket) => new_socket,
/// Err(fail) => {
/// // We panic! here, but you could try to bind it again on another address.
/// panic!("Failed to bind socket. {:?}", fail);
/// }
/// };
///
/// // Our socket was created, but we should not use it before checking it's readiness.
/// # drop(socket); // Silence unused variable warning.
/// # Ok(())
/// # }
/// ```
pub fn bind(addr: SocketAddr) -> io::Result<UdpSocket> {
sys::udp::bind(addr).map(UdpSocket::from_std)
}
/// Creates a new `UdpSocket` from a standard `net::UdpSocket`.
///
/// This function is intended to be used to wrap a UDP socket from the
/// standard library in the Mio equivalent. The conversion assumes nothing
/// about the underlying socket; it is left up to the user to set it in
/// non-blocking mode.
pub fn from_std(socket: net::UdpSocket) -> UdpSocket {
UdpSocket {
inner: IoSource::new(socket),
}
}
/// Returns the socket address that this socket was created from.
///
/// # Examples
///
// This assertion is almost, but not quite, universal. It fails on
// shared-IP FreeBSD jails. It's hard for mio to know whether we're jailed,
// so simply disable the test on FreeBSD.
#[cfg_attr(all(feature = "os-poll", not(target_os = "freebsd")), doc = "```")]
#[cfg_attr(
any(not(feature = "os-poll"), target_os = "freebsd"),
doc = "```ignore"
)]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let addr = "127.0.0.1:0".parse()?;
/// let socket = UdpSocket::bind(addr)?;
/// assert_eq!(socket.local_addr()?.ip(), addr.ip());
/// # Ok(())
/// # }
/// ```
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.inner.local_addr()
}
/// Returns the socket address of the remote peer this socket was connected to.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let addr = "127.0.0.1:0".parse()?;
/// let peer_addr = "127.0.0.1:11100".parse()?;
/// let socket = UdpSocket::bind(addr)?;
/// socket.connect(peer_addr)?;
/// assert_eq!(socket.peer_addr()?.ip(), peer_addr.ip());
/// # Ok(())
/// # }
/// ```
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.inner.peer_addr()
}
/// Sends data on the socket to the given address. On success, returns the
/// number of bytes written.
///
/// Address type can be any implementor of `ToSocketAddrs` trait. See its
/// documentation for concrete examples.
///
/// # Examples
///
/// ```no_run
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
///
/// // We must check if the socket is writable before calling send_to,
/// // or we could run into a WouldBlock error.
///
/// let bytes_sent = socket.send_to(&[9; 9], "127.0.0.1:11100".parse()?)?;
/// assert_eq!(bytes_sent, 9);
/// #
/// # Ok(())
/// # }
/// ```
pub fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
self.inner.do_io(|inner| inner.send_to(buf, target))
}
/// Receives data from the socket. On success, returns the number of bytes
/// read and the address from whence the data came.
///
/// # Notes
///
/// On Windows, if the data is larger than the buffer specified, the buffer
/// is filled with the first part of the data, and recv_from returns the error
/// WSAEMSGSIZE(10040). The excess data is lost.
/// Make sure to always use a sufficiently large buffer to hold the
/// maximum UDP packet size, which can be up to 65536 bytes in size.
///
/// # Examples
///
/// ```no_run
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
///
/// // We must check if the socket is readable before calling recv_from,
/// // or we could run into a WouldBlock error.
///
/// let mut buf = [0; 9];
/// let (num_recv, from_addr) = socket.recv_from(&mut buf)?;
/// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
/// #
/// # Ok(())
/// # }
/// ```
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.inner.do_io(|inner| inner.recv_from(buf))
}
/// Receives data from the socket, without removing it from the input queue.
/// On success, returns the number of bytes read and the address from whence
/// the data came.
///
/// # Notes
///
/// On Windows, if the data is larger than the buffer specified, the buffer
/// is filled with the first part of the data, and peek_from returns the error
/// WSAEMSGSIZE(10040). The excess data is lost.
/// Make sure to always use a sufficiently large buffer to hold the
/// maximum UDP packet size, which can be up to 65536 bytes in size.
///
/// # Examples
///
/// ```no_run
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
///
/// // We must check if the socket is readable before calling recv_from,
/// // or we could run into a WouldBlock error.
///
/// let mut buf = [0; 9];
/// let (num_recv, from_addr) = socket.peek_from(&mut buf)?;
/// println!("Received {:?} -> {:?} bytes from {:?}", buf, num_recv, from_addr);
/// #
/// # Ok(())
/// # }
/// ```
pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.inner.do_io(|inner| inner.peek_from(buf))
}
/// Sends data on the socket to the address previously bound via connect(). On success,
/// returns the number of bytes written.
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|inner| inner.send(buf))
}
/// Receives data from the socket previously bound with connect(). On success, returns
/// the number of bytes read.
///
/// # Notes
///
/// On Windows, if the data is larger than the buffer specified, the buffer
/// is filled with the first part of the data, and recv returns the error
/// WSAEMSGSIZE(10040). The excess data is lost.
/// Make sure to always use a sufficiently large buffer to hold the
/// maximum UDP packet size, which can be up to 65536 bytes in size.
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|inner| inner.recv(buf))
}
/// Receives data from the socket, without removing it from the input queue.
/// On success, returns the number of bytes read.
///
/// # Notes
///
/// On Windows, if the data is larger than the buffer specified, the buffer
/// is filled with the first part of the data, and peek returns the error
/// WSAEMSGSIZE(10040). The excess data is lost.
/// Make sure to always use a sufficiently large buffer to hold the
/// maximum UDP packet size, which can be up to 65536 bytes in size.
pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|inner| inner.peek(buf))
}
/// Connects the UDP socket setting the default destination for `send()`
/// and limiting packets that are read via `recv` from the address specified
/// in `addr`.
///
/// This may return a `WouldBlock` in which case the socket connection
/// cannot be completed immediately, it usually means there are insufficient
/// entries in the routing cache.
pub fn connect(&self, addr: SocketAddr) -> io::Result<()> {
self.inner.connect(addr)
}
/// Sets the value of the `SO_BROADCAST` option for this socket.
///
/// When enabled, this socket is allowed to send packets to a broadcast
/// address.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
/// if broadcast_socket.broadcast()? == false {
/// broadcast_socket.set_broadcast(true)?;
/// }
///
/// assert_eq!(broadcast_socket.broadcast()?, true);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_broadcast(&self, on: bool) -> io::Result<()> {
self.inner.set_broadcast(on)
}
/// Gets the value of the `SO_BROADCAST` option for this socket.
///
/// For more information about this option, see
/// [`set_broadcast`][link].
///
/// [link]: #method.set_broadcast
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let broadcast_socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
/// assert_eq!(broadcast_socket.broadcast()?, false);
/// #
/// # Ok(())
/// # }
/// ```
pub fn broadcast(&self) -> io::Result<bool> {
self.inner.broadcast()
}
/// Sets the value of the `IP_MULTICAST_LOOP` option for this socket.
///
/// If enabled, multicast packets will be looped back to the local socket.
/// Note that this may not have any affect on IPv6 sockets.
pub fn set_multicast_loop_v4(&self, on: bool) -> io::Result<()> {
self.inner.set_multicast_loop_v4(on)
}
/// Gets the value of the `IP_MULTICAST_LOOP` option for this socket.
///
/// For more information about this option, see
/// [`set_multicast_loop_v4`][link].
///
/// [link]: #method.set_multicast_loop_v4
pub fn multicast_loop_v4(&self) -> io::Result<bool> {
self.inner.multicast_loop_v4()
}
/// Sets the value of the `IP_MULTICAST_TTL` option for this socket.
///
/// Indicates the time-to-live value of outgoing multicast packets for
/// this socket. The default value is 1 which means that multicast packets
/// don't leave the local network unless explicitly requested.
///
/// Note that this may not have any affect on IPv6 sockets.
pub fn set_multicast_ttl_v4(&self, ttl: u32) -> io::Result<()> {
self.inner.set_multicast_ttl_v4(ttl)
}
/// Gets the value of the `IP_MULTICAST_TTL` option for this socket.
///
/// For more information about this option, see
/// [`set_multicast_ttl_v4`][link].
///
/// [link]: #method.set_multicast_ttl_v4
pub fn multicast_ttl_v4(&self) -> io::Result<u32> {
self.inner.multicast_ttl_v4()
}
/// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
///
/// Controls whether this socket sees the multicast packets it sends itself.
/// Note that this may not have any affect on IPv4 sockets.
pub fn set_multicast_loop_v6(&self, on: bool) -> io::Result<()> {
self.inner.set_multicast_loop_v6(on)
}
/// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket.
///
/// For more information about this option, see
/// [`set_multicast_loop_v6`][link].
///
/// [link]: #method.set_multicast_loop_v6
pub fn multicast_loop_v6(&self) -> io::Result<bool> {
self.inner.multicast_loop_v6()
}
/// Sets the value for the `IP_TTL` option on this socket.
///
/// This value sets the time-to-live field that is used in every packet sent
/// from this socket.
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
/// if socket.ttl()? < 255 {
/// socket.set_ttl(255)?;
/// }
///
/// assert_eq!(socket.ttl()?, 255);
/// #
/// # Ok(())
/// # }
/// ```
pub fn set_ttl(&self, ttl: u32) -> io::Result<()> {
self.inner.set_ttl(ttl)
}
/// Gets the value of the `IP_TTL` option for this socket.
///
/// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
///
/// # Examples
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::net::UdpSocket;
///
/// let socket = UdpSocket::bind("127.0.0.1:0".parse()?)?;
/// socket.set_ttl(255)?;
///
/// assert_eq!(socket.ttl()?, 255);
/// #
/// # Ok(())
/// # }
/// ```
pub fn ttl(&self) -> io::Result<u32> {
self.inner.ttl()
}
/// Executes an operation of the `IP_ADD_MEMBERSHIP` type.
///
/// This function specifies a new multicast group for this socket to join.
/// The address must be a valid multicast address, and `interface` is the
/// address of the local interface with which the system should join the
/// multicast group. If it's equal to `INADDR_ANY` then an appropriate
/// interface is chosen by the system.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn join_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
self.inner.join_multicast_v4(multiaddr, interface)
}
/// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type.
///
/// This function specifies a new multicast group for this socket to join.
/// The address must be a valid multicast address, and `interface` is the
/// index of the interface to join/leave (or 0 to indicate any interface).
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn join_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
self.inner.join_multicast_v6(multiaddr, interface)
}
/// Executes an operation of the `IP_DROP_MEMBERSHIP` type.
///
/// For more information about this option, see
/// [`join_multicast_v4`][link].
///
/// [link]: #method.join_multicast_v4
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn leave_multicast_v4(&self, multiaddr: &Ipv4Addr, interface: &Ipv4Addr) -> io::Result<()> {
self.inner.leave_multicast_v4(multiaddr, interface)
}
/// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type.
///
/// For more information about this option, see
/// [`join_multicast_v6`][link].
///
/// [link]: #method.join_multicast_v6
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn leave_multicast_v6(&self, multiaddr: &Ipv6Addr, interface: u32) -> io::Result<()> {
self.inner.leave_multicast_v6(multiaddr, interface)
}
/// Get the value of the `IPV6_V6ONLY` option on this socket.
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn only_v6(&self) -> io::Result<bool> {
sys::udp::only_v6(&self.inner)
}
/// Get the value of the `SO_ERROR` option on this socket.
///
/// This will retrieve the stored error in the underlying socket, clearing
/// the field in the process. This can be useful for checking errors between
/// calls.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
/// Execute an I/O operation ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
///
/// # Examples
///
#[cfg_attr(unix, doc = "```no_run")]
#[cfg_attr(windows, doc = "```ignore")]
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use std::io;
/// #[cfg(unix)]
/// use std::os::unix::io::AsRawFd;
/// #[cfg(windows)]
/// use std::os::windows::io::AsRawSocket;
/// use mio::net::UdpSocket;
///
/// let address = "127.0.0.1:8080".parse().unwrap();
/// let dgram = UdpSocket::bind(address)?;
///
/// // Wait until the dgram is readable...
///
/// // Read from the dgram using a direct libc call, of course the
/// // `io::Read` implementation would be easier to use.
/// let mut buf = [0; 512];
/// let n = dgram.try_io(|| {
/// let buf_ptr = &mut buf as *mut _ as *mut _;
/// #[cfg(unix)]
/// let res = unsafe { libc::recv(dgram.as_raw_fd(), buf_ptr, buf.len(), 0) };
/// #[cfg(windows)]
/// let res = unsafe { libc::recvfrom(dgram.as_raw_socket() as usize, buf_ptr, buf.len() as i32, 0, std::ptr::null_mut(), std::ptr::null_mut()) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("read {} bytes", n);
/// # Ok(())
/// # }
/// ```
pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T>,
{
self.inner.do_io(|_| f())
}
}
impl event::Source for UdpSocket {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl fmt::Debug for UdpSocket {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
#[cfg(unix)]
impl IntoRawFd for UdpSocket {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
#[cfg(unix)]
impl AsRawFd for UdpSocket {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
#[cfg(unix)]
impl FromRawFd for UdpSocket {
/// Converts a `RawFd` to a `UdpSocket`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> UdpSocket {
UdpSocket::from_std(FromRawFd::from_raw_fd(fd))
}
}
#[cfg(windows)]
impl IntoRawSocket for UdpSocket {
fn into_raw_socket(self) -> RawSocket {
self.inner.into_inner().into_raw_socket()
}
}
#[cfg(windows)]
impl AsRawSocket for UdpSocket {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
#[cfg(windows)]
impl FromRawSocket for UdpSocket {
/// Converts a `RawSocket` to a `UdpSocket`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_socket(socket: RawSocket) -> UdpSocket {
UdpSocket::from_std(FromRawSocket::from_raw_socket(socket))
}
}

View file

@ -0,0 +1,236 @@
use crate::io_source::IoSource;
use crate::{event, sys, Interest, Registry, Token};
use std::net::Shutdown;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net;
use std::path::Path;
use std::{fmt, io};
/// A Unix datagram socket.
pub struct UnixDatagram {
inner: IoSource<net::UnixDatagram>,
}
impl UnixDatagram {
/// Creates a Unix datagram socket bound to the given path.
pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
sys::uds::datagram::bind(path.as_ref()).map(UnixDatagram::from_std)
}
/// Creates a new `UnixDatagram` from a standard `net::UnixDatagram`.
///
/// This function is intended to be used to wrap a Unix datagram from the
/// standard library in the Mio equivalent. The conversion assumes nothing
/// about the underlying datagram; it is left up to the user to set it in
/// non-blocking mode.
pub fn from_std(socket: net::UnixDatagram) -> UnixDatagram {
UnixDatagram {
inner: IoSource::new(socket),
}
}
/// Connects the socket to the specified address.
///
/// This may return a `WouldBlock` in which case the socket connection
/// cannot be completed immediately.
pub fn connect<P: AsRef<Path>>(&self, path: P) -> io::Result<()> {
self.inner.connect(path)
}
/// Creates a Unix Datagram socket which is not bound to any address.
pub fn unbound() -> io::Result<UnixDatagram> {
sys::uds::datagram::unbound().map(UnixDatagram::from_std)
}
/// Create an unnamed pair of connected sockets.
pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
sys::uds::datagram::pair().map(|(socket1, socket2)| {
(
UnixDatagram::from_std(socket1),
UnixDatagram::from_std(socket2),
)
})
}
/// Returns the address of this socket.
pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
sys::uds::datagram::local_addr(&self.inner)
}
/// Returns the address of this socket's peer.
///
/// The `connect` method will connect the socket to a peer.
pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> {
sys::uds::datagram::peer_addr(&self.inner)
}
/// Receives data from the socket.
///
/// On success, returns the number of bytes read and the address from
/// whence the data came.
pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, sys::SocketAddr)> {
self.inner
.do_io(|inner| sys::uds::datagram::recv_from(inner, buf))
}
/// Receives data from the socket.
///
/// On success, returns the number of bytes read.
pub fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|inner| inner.recv(buf))
}
/// Sends data on the socket to the specified address.
///
/// On success, returns the number of bytes written.
pub fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> {
self.inner.do_io(|inner| inner.send_to(buf, path))
}
/// Sends data on the socket to the socket's peer.
///
/// The peer address may be set by the `connect` method, and this method
/// will return an error if the socket has not already been connected.
///
/// On success, returns the number of bytes written.
pub fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|inner| inner.send(buf))
}
/// Returns the value of the `SO_ERROR` option.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
/// Shut down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O calls on the
/// specified portions to immediately return with an appropriate value
/// (see the documentation of `Shutdown`).
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.inner.shutdown(how)
}
/// Execute an I/O operation ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use std::io;
/// use std::os::unix::io::AsRawFd;
/// use mio::net::UnixDatagram;
///
/// let (dgram1, dgram2) = UnixDatagram::pair()?;
///
/// // Wait until the dgram is writable...
///
/// // Write to the dgram using a direct libc call, of course the
/// // `io::Write` implementation would be easier to use.
/// let buf = b"hello";
/// let n = dgram1.try_io(|| {
/// let buf_ptr = &buf as *const _ as *const _;
/// let res = unsafe { libc::send(dgram1.as_raw_fd(), buf_ptr, buf.len(), 0) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::send, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("write {} bytes", n);
///
/// // Wait until the dgram is readable...
///
/// // Read from the dgram using a direct libc call, of course the
/// // `io::Read` implementation would be easier to use.
/// let mut buf = [0; 512];
/// let n = dgram2.try_io(|| {
/// let buf_ptr = &mut buf as *mut _ as *mut _;
/// let res = unsafe { libc::recv(dgram2.as_raw_fd(), buf_ptr, buf.len(), 0) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("read {} bytes", n);
/// # Ok(())
/// # }
/// ```
pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T>,
{
self.inner.do_io(|_| f())
}
}
impl event::Source for UnixDatagram {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl fmt::Debug for UnixDatagram {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
impl IntoRawFd for UnixDatagram {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
impl AsRawFd for UnixDatagram {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl FromRawFd for UnixDatagram {
/// Converts a `RawFd` to a `UnixDatagram`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram {
UnixDatagram::from_std(FromRawFd::from_raw_fd(fd))
}
}

View file

@ -0,0 +1,109 @@
use crate::io_source::IoSource;
use crate::net::{SocketAddr, UnixStream};
use crate::{event, sys, Interest, Registry, Token};
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net;
use std::path::Path;
use std::{fmt, io};
/// A non-blocking Unix domain socket server.
pub struct UnixListener {
inner: IoSource<net::UnixListener>,
}
impl UnixListener {
/// Creates a new `UnixListener` bound to the specified socket `path`.
pub fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
sys::uds::listener::bind(path.as_ref()).map(UnixListener::from_std)
}
/// Creates a new `UnixListener` bound to the specified socket `address`.
pub fn bind_addr(address: &SocketAddr) -> io::Result<UnixListener> {
sys::uds::listener::bind_addr(address).map(UnixListener::from_std)
}
/// Creates a new `UnixListener` from a standard `net::UnixListener`.
///
/// This function is intended to be used to wrap a Unix listener from the
/// standard library in the Mio equivalent. The conversion assumes nothing
/// about the underlying listener; it is left up to the user to set it in
/// non-blocking mode.
pub fn from_std(listener: net::UnixListener) -> UnixListener {
UnixListener {
inner: IoSource::new(listener),
}
}
/// Accepts a new incoming connection to this listener.
///
/// The call is responsible for ensuring that the listening socket is in
/// non-blocking mode.
pub fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
sys::uds::listener::accept(&self.inner)
}
/// Returns the local socket address of this listener.
pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
sys::uds::listener::local_addr(&self.inner)
}
/// Returns the value of the `SO_ERROR` option.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
}
impl event::Source for UnixListener {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl fmt::Debug for UnixListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
impl IntoRawFd for UnixListener {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
impl AsRawFd for UnixListener {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl FromRawFd for UnixListener {
/// Converts a `RawFd` to a `UnixListener`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> UnixListener {
UnixListener::from_std(FromRawFd::from_raw_fd(fd))
}
}

View file

@ -0,0 +1,10 @@
mod datagram;
pub use self::datagram::UnixDatagram;
mod listener;
pub use self::listener::UnixListener;
mod stream;
pub use self::stream::UnixStream;
pub use crate::sys::SocketAddr;

View file

@ -0,0 +1,254 @@
use crate::io_source::IoSource;
use crate::net::SocketAddr;
use crate::{event, sys, Interest, Registry, Token};
use std::fmt;
use std::io::{self, IoSlice, IoSliceMut, Read, Write};
use std::net::Shutdown;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net;
use std::path::Path;
/// A non-blocking Unix stream socket.
pub struct UnixStream {
inner: IoSource<net::UnixStream>,
}
impl UnixStream {
/// Connects to the socket named by `path`.
///
/// This may return a `WouldBlock` in which case the socket connection
/// cannot be completed immediately. Usually it means the backlog is full.
pub fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
sys::uds::stream::connect(path.as_ref()).map(UnixStream::from_std)
}
/// Connects to the socket named by `address`.
///
/// This may return a `WouldBlock` in which case the socket connection
/// cannot be completed immediately. Usually it means the backlog is full.
pub fn connect_addr(address: &SocketAddr) -> io::Result<UnixStream> {
sys::uds::stream::connect_addr(address).map(UnixStream::from_std)
}
/// Creates a new `UnixStream` from a standard `net::UnixStream`.
///
/// This function is intended to be used to wrap a Unix stream from the
/// standard library in the Mio equivalent. The conversion assumes nothing
/// about the underlying stream; it is left up to the user to set it in
/// non-blocking mode.
///
/// # Note
///
/// The Unix stream here will not have `connect` called on it, so it
/// should already be connected via some other means (be it manually, or
/// the standard library).
pub fn from_std(stream: net::UnixStream) -> UnixStream {
UnixStream {
inner: IoSource::new(stream),
}
}
/// Creates an unnamed pair of connected sockets.
///
/// Returns two `UnixStream`s which are connected to each other.
pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
sys::uds::stream::pair().map(|(stream1, stream2)| {
(UnixStream::from_std(stream1), UnixStream::from_std(stream2))
})
}
/// Returns the socket address of the local half of this connection.
pub fn local_addr(&self) -> io::Result<sys::SocketAddr> {
sys::uds::stream::local_addr(&self.inner)
}
/// Returns the socket address of the remote half of this connection.
pub fn peer_addr(&self) -> io::Result<sys::SocketAddr> {
sys::uds::stream::peer_addr(&self.inner)
}
/// Returns the value of the `SO_ERROR` option.
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
self.inner.take_error()
}
/// Shuts down the read, write, or both halves of this connection.
///
/// This function will cause all pending and future I/O calls on the
/// specified portions to immediately return with an appropriate value
/// (see the documentation of `Shutdown`).
pub fn shutdown(&self, how: Shutdown) -> io::Result<()> {
self.inner.shutdown(how)
}
/// Execute an I/O operation ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use std::io;
/// use std::os::unix::io::AsRawFd;
/// use mio::net::UnixStream;
///
/// let (stream1, stream2) = UnixStream::pair()?;
///
/// // Wait until the stream is writable...
///
/// // Write to the stream using a direct libc call, of course the
/// // `io::Write` implementation would be easier to use.
/// let buf = b"hello";
/// let n = stream1.try_io(|| {
/// let buf_ptr = &buf as *const _ as *const _;
/// let res = unsafe { libc::send(stream1.as_raw_fd(), buf_ptr, buf.len(), 0) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::send, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("write {} bytes", n);
///
/// // Wait until the stream is readable...
///
/// // Read from the stream using a direct libc call, of course the
/// // `io::Read` implementation would be easier to use.
/// let mut buf = [0; 512];
/// let n = stream2.try_io(|| {
/// let buf_ptr = &mut buf as *mut _ as *mut _;
/// let res = unsafe { libc::recv(stream2.as_raw_fd(), buf_ptr, buf.len(), 0) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::recv, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("read {} bytes", n);
/// # Ok(())
/// # }
/// ```
pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T>,
{
self.inner.do_io(|_| f())
}
}
impl Read for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read(buf))
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read_vectored(bufs))
}
}
impl<'a> Read for &'a UnixStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read(buf))
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.read_vectored(bufs))
}
}
impl Write for UnixStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write(buf))
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write_vectored(bufs))
}
fn flush(&mut self) -> io::Result<()> {
self.inner.do_io(|mut inner| inner.flush())
}
}
impl<'a> Write for &'a UnixStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write(buf))
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut inner| inner.write_vectored(bufs))
}
fn flush(&mut self) -> io::Result<()> {
self.inner.do_io(|mut inner| inner.flush())
}
}
impl event::Source for UnixStream {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl fmt::Debug for UnixStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
impl IntoRawFd for UnixStream {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
impl AsRawFd for UnixStream {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl FromRawFd for UnixStream {
/// Converts a `RawFd` to a `UnixStream`.
///
/// # Notes
///
/// The caller is responsible for ensuring that the socket is in
/// non-blocking mode.
unsafe fn from_raw_fd(fd: RawFd) -> UnixStream {
UnixStream::from_std(FromRawFd::from_raw_fd(fd))
}
}

743
third-party/vendor/mio/src/poll.rs vendored Normal file
View file

@ -0,0 +1,743 @@
#[cfg(all(
unix,
not(mio_unsupported_force_poll_poll),
not(any(target_os = "solaris", target_os = "vita"))
))]
use std::os::unix::io::{AsRawFd, RawFd};
#[cfg(all(debug_assertions, not(target_os = "wasi")))]
use std::sync::atomic::{AtomicBool, Ordering};
#[cfg(all(debug_assertions, not(target_os = "wasi")))]
use std::sync::Arc;
use std::time::Duration;
use std::{fmt, io};
use crate::{event, sys, Events, Interest, Token};
/// Polls for readiness events on all registered values.
///
/// `Poll` allows a program to monitor a large number of [`event::Source`]s,
/// waiting until one or more become "ready" for some class of operations; e.g.
/// reading and writing. An event source is considered ready if it is possible
/// to immediately perform a corresponding operation; e.g. [`read`] or
/// [`write`].
///
/// To use `Poll`, an `event::Source` must first be registered with the `Poll`
/// instance using the [`register`] method on its associated `Register`,
/// supplying readiness interest. The readiness interest tells `Poll` which
/// specific operations on the handle to monitor for readiness. A `Token` is
/// also passed to the [`register`] function. When `Poll` returns a readiness
/// event, it will include this token. This associates the event with the
/// event source that generated the event.
///
/// [`event::Source`]: ./event/trait.Source.html
/// [`read`]: ./net/struct.TcpStream.html#method.read
/// [`write`]: ./net/struct.TcpStream.html#method.write
/// [`register`]: struct.Registry.html#method.register
///
/// # Examples
///
/// A basic example -- establishing a `TcpStream` connection.
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll, Interest, Token};
/// use mio::net::TcpStream;
///
/// use std::net::{self, SocketAddr};
///
/// // Bind a server socket to connect to.
/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
/// let server = net::TcpListener::bind(addr)?;
///
/// // Construct a new `Poll` handle as well as the `Events` we'll store into
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(1024);
///
/// // Connect the stream
/// let mut stream = TcpStream::connect(server.local_addr()?)?;
///
/// // Register the stream with `Poll`
/// poll.registry().register(&mut stream, Token(0), Interest::READABLE | Interest::WRITABLE)?;
///
/// // Wait for the socket to become ready. This has to happens in a loop to
/// // handle spurious wakeups.
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for event in &events {
/// if event.token() == Token(0) && event.is_writable() {
/// // The socket connected (probably, it could still be a spurious
/// // wakeup)
/// return Ok(());
/// }
/// }
/// }
/// # }
/// ```
///
/// # Portability
///
/// Using `Poll` provides a portable interface across supported platforms as
/// long as the caller takes the following into consideration:
///
/// ### Spurious events
///
/// [`Poll::poll`] may return readiness events even if the associated
/// event source is not actually ready. Given the same code, this may
/// happen more on some platforms than others. It is important to never assume
/// that, just because a readiness event was received, that the associated
/// operation will succeed as well.
///
/// If operation fails with [`WouldBlock`], then the caller should not treat
/// this as an error, but instead should wait until another readiness event is
/// received.
///
/// ### Draining readiness
///
/// Once a readiness event is received, the corresponding operation must be
/// performed repeatedly until it returns [`WouldBlock`]. Unless this is done,
/// there is no guarantee that another readiness event will be delivered, even
/// if further data is received for the event source.
///
/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
///
/// ### Readiness operations
///
/// The only readiness operations that are guaranteed to be present on all
/// supported platforms are [`readable`] and [`writable`]. All other readiness
/// operations may have false negatives and as such should be considered
/// **hints**. This means that if a socket is registered with [`readable`]
/// interest and either an error or close is received, a readiness event will
/// be generated for the socket, but it **may** only include `readable`
/// readiness. Also note that, given the potential for spurious events,
/// receiving a readiness event with `read_closed`, `write_closed`, or `error`
/// doesn't actually mean that a `read` on the socket will return a result
/// matching the readiness event.
///
/// In other words, portable programs that explicitly check for [`read_closed`],
/// [`write_closed`], or [`error`] readiness should be doing so as an
/// **optimization** and always be able to handle an error or close situation
/// when performing the actual read operation.
///
/// [`readable`]: ./event/struct.Event.html#method.is_readable
/// [`writable`]: ./event/struct.Event.html#method.is_writable
/// [`error`]: ./event/struct.Event.html#method.is_error
/// [`read_closed`]: ./event/struct.Event.html#method.is_read_closed
/// [`write_closed`]: ./event/struct.Event.html#method.is_write_closed
///
/// ### Registering handles
///
/// Unless otherwise noted, it should be assumed that types implementing
/// [`event::Source`] will never become ready unless they are registered with
/// `Poll`.
///
/// For example:
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # use std::net;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Poll, Interest, Token};
/// use mio::net::TcpStream;
/// use std::net::SocketAddr;
/// use std::time::Duration;
/// use std::thread;
///
/// let address: SocketAddr = "127.0.0.1:0".parse()?;
/// let listener = net::TcpListener::bind(address)?;
/// let mut sock = TcpStream::connect(listener.local_addr()?)?;
///
/// thread::sleep(Duration::from_secs(1));
///
/// let poll = Poll::new()?;
///
/// // The connect is not guaranteed to have started until it is registered at
/// // this point
/// poll.registry().register(&mut sock, Token(0), Interest::READABLE | Interest::WRITABLE)?;
/// # Ok(())
/// # }
/// ```
///
/// ### Dropping `Poll`
///
/// When the `Poll` instance is dropped it may cancel in-flight operations for
/// the registered [event sources], meaning that no further events for them may
/// be received. It also means operations on the registered event sources may no
/// longer work. It is up to the user to keep the `Poll` instance alive while
/// registered event sources are being used.
///
/// [event sources]: ./event/trait.Source.html
///
/// ### Accessing raw fd/socket/handle
///
/// Mio makes it possible for many types to be converted into a raw file
/// descriptor (fd, Unix), socket (Windows) or handle (Windows). This makes it
/// possible to support more operations on the type than Mio supports, for
/// example it makes [mio-aio] possible. However accessing the raw fd is not
/// without it's pitfalls.
///
/// Specifically performing I/O operations outside of Mio on these types (via
/// the raw fd) has unspecified behaviour. It could cause no more events to be
/// generated for the type even though it returned `WouldBlock` (in an operation
/// directly accessing the fd). The behaviour is OS specific and Mio can only
/// guarantee cross-platform behaviour if it can control the I/O.
///
/// [mio-aio]: https://github.com/asomers/mio-aio
///
/// *The following is **not** guaranteed, just a description of the current
/// situation!* Mio is allowed to change the following without it being considered
/// a breaking change, don't depend on this, it's just here to inform the user.
/// Currently the kqueue and epoll implementation support direct I/O operations
/// on the fd without Mio's knowledge. Windows however needs **all** I/O
/// operations to go through Mio otherwise it is not able to update it's
/// internal state properly and won't generate events.
///
/// ### Polling without registering event sources
///
///
/// *The following is **not** guaranteed, just a description of the current
/// situation!* Mio is allowed to change the following without it being
/// considered a breaking change, don't depend on this, it's just here to inform
/// the user. On platforms that use epoll, kqueue or IOCP (see implementation
/// notes below) polling without previously registering [event sources] will
/// result in sleeping forever, only a process signal will be able to wake up
/// the thread.
///
/// On WASM/WASI this is different as it doesn't support process signals,
/// furthermore the WASI specification doesn't specify a behaviour in this
/// situation, thus it's up to the implementation what to do here. As an
/// example, the wasmtime runtime will return `EINVAL` in this situation, but
/// different runtimes may return different results. If you have further
/// insights or thoughts about this situation (and/or how Mio should handle it)
/// please add you comment to [pull request#1580].
///
/// [event sources]: crate::event::Source
/// [pull request#1580]: https://github.com/tokio-rs/mio/pull/1580
///
/// # Implementation notes
///
/// `Poll` is backed by the selector provided by the operating system.
///
/// | OS | Selector |
/// |---------------|-----------|
/// | Android | [epoll] |
/// | DragonFly BSD | [kqueue] |
/// | FreeBSD | [kqueue] |
/// | iOS | [kqueue] |
/// | illumos | [epoll] |
/// | Linux | [epoll] |
/// | NetBSD | [kqueue] |
/// | OpenBSD | [kqueue] |
/// | Windows | [IOCP] |
/// | macOS | [kqueue] |
///
/// On all supported platforms, socket operations are handled by using the
/// system selector. Platform specific extensions (e.g. [`SourceFd`]) allow
/// accessing other features provided by individual system selectors. For
/// example, Linux's [`signalfd`] feature can be used by registering the FD with
/// `Poll` via [`SourceFd`].
///
/// On all platforms except windows, a call to [`Poll::poll`] is mostly just a
/// direct call to the system selector. However, [IOCP] uses a completion model
/// instead of a readiness model. In this case, `Poll` must adapt the completion
/// model Mio's API. While non-trivial, the bridge layer is still quite
/// efficient. The most expensive part being calls to `read` and `write` require
/// data to be copied into an intermediate buffer before it is passed to the
/// kernel.
///
/// [epoll]: https://man7.org/linux/man-pages/man7/epoll.7.html
/// [kqueue]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2
/// [IOCP]: https://docs.microsoft.com/en-us/windows/win32/fileio/i-o-completion-ports
/// [`signalfd`]: https://man7.org/linux/man-pages/man2/signalfd.2.html
/// [`SourceFd`]: unix/struct.SourceFd.html
/// [`Poll::poll`]: struct.Poll.html#method.poll
pub struct Poll {
registry: Registry,
}
/// Registers I/O resources.
pub struct Registry {
selector: sys::Selector,
/// Whether this selector currently has an associated waker.
#[cfg(all(debug_assertions, not(target_os = "wasi")))]
has_waker: Arc<AtomicBool>,
}
impl Poll {
cfg_os_poll! {
/// Return a new `Poll` handle.
///
/// This function will make a syscall to the operating system to create
/// the system selector. If this syscall fails, `Poll::new` will return
/// with the error.
///
/// close-on-exec flag is set on the file descriptors used by the selector to prevent
/// leaking it to executed processes. However, on some systems such as
/// old Linux systems that don't support `epoll_create1` syscall it is done
/// non-atomically, so a separate thread executing in parallel to this
/// function may accidentally leak the file descriptor if it executes a
/// new process before this function returns.
///
/// See [struct] level docs for more details.
///
/// [struct]: struct.Poll.html
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Poll, Events};
/// use std::time::Duration;
///
/// let mut poll = match Poll::new() {
/// Ok(poll) => poll,
/// Err(e) => panic!("failed to create Poll instance; err={:?}", e),
/// };
///
/// // Create a structure to receive polled events
/// let mut events = Events::with_capacity(1024);
///
/// // Wait for events, but none will be received because no
/// // `event::Source`s have been registered with this `Poll` instance.
/// poll.poll(&mut events, Some(Duration::from_millis(500)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<Poll> {
sys::Selector::new().map(|selector| Poll {
registry: Registry {
selector,
#[cfg(all(debug_assertions, not(target_os = "wasi")))]
has_waker: Arc::new(AtomicBool::new(false)),
},
})
}
}
/// Create a separate `Registry` which can be used to register
/// `event::Source`s.
pub fn registry(&self) -> &Registry {
&self.registry
}
/// Wait for readiness events
///
/// Blocks the current thread and waits for readiness events for any of the
/// [`event::Source`]s that have been registered with this `Poll` instance.
/// The function will block until either at least one readiness event has
/// been received or `timeout` has elapsed. A `timeout` of `None` means that
/// `poll` will block until a readiness event has been received.
///
/// The supplied `events` will be cleared and newly received readiness events
/// will be pushed onto the end. At most `events.capacity()` events will be
/// returned. If there are further pending readiness events, they will be
/// returned on the next call to `poll`.
///
/// A single call to `poll` may result in multiple readiness events being
/// returned for a single event source. For example, if a TCP socket becomes
/// both readable and writable, it may be possible for a single readiness
/// event to be returned with both [`readable`] and [`writable`] readiness
/// **OR** two separate events may be returned, one with [`readable`] set
/// and one with [`writable`] set.
///
/// Note that the `timeout` will be rounded up to the system clock
/// granularity (usually 1ms), and kernel scheduling delays mean that
/// the blocking interval may be overrun by a small amount.
///
/// See the [struct] level documentation for a higher level discussion of
/// polling.
///
/// [`event::Source`]: ./event/trait.Source.html
/// [`readable`]: struct.Interest.html#associatedconstant.READABLE
/// [`writable`]: struct.Interest.html#associatedconstant.WRITABLE
/// [struct]: struct.Poll.html
/// [`iter`]: ./event/struct.Events.html#method.iter
///
/// # Notes
///
/// This returns any errors without attempting to retry, previous versions
/// of Mio would automatically retry the poll call if it was interrupted
/// (if `EINTR` was returned).
///
/// Currently if the `timeout` elapses without any readiness events
/// triggering this will return `Ok(())`. However we're not guaranteeing
/// this behaviour as this depends on the OS.
///
/// # Examples
///
/// A basic example -- establishing a `TcpStream` connection.
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll, Interest, Token};
/// use mio::net::TcpStream;
///
/// use std::net::{TcpListener, SocketAddr};
/// use std::thread;
///
/// // Bind a server socket to connect to.
/// let addr: SocketAddr = "127.0.0.1:0".parse()?;
/// let server = TcpListener::bind(addr)?;
/// let addr = server.local_addr()?.clone();
///
/// // Spawn a thread to accept the socket
/// thread::spawn(move || {
/// let _ = server.accept();
/// });
///
/// // Construct a new `Poll` handle as well as the `Events` we'll store into
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(1024);
///
/// // Connect the stream
/// let mut stream = TcpStream::connect(addr)?;
///
/// // Register the stream with `Poll`
/// poll.registry().register(
/// &mut stream,
/// Token(0),
/// Interest::READABLE | Interest::WRITABLE)?;
///
/// // Wait for the socket to become ready. This has to happens in a loop to
/// // handle spurious wakeups.
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for event in &events {
/// if event.token() == Token(0) && event.is_writable() {
/// // The socket connected (probably, it could still be a spurious
/// // wakeup)
/// return Ok(());
/// }
/// }
/// }
/// # }
/// ```
///
/// [struct]: #
pub fn poll(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
self.registry.selector.select(events.sys(), timeout)
}
}
#[cfg(all(
unix,
not(mio_unsupported_force_poll_poll),
not(any(target_os = "solaris", target_os = "vita"))
))]
impl AsRawFd for Poll {
fn as_raw_fd(&self) -> RawFd {
self.registry.as_raw_fd()
}
}
impl fmt::Debug for Poll {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Poll").finish()
}
}
impl Registry {
/// Register an [`event::Source`] with the `Poll` instance.
///
/// Once registered, the `Poll` instance will monitor the event source for
/// readiness state changes. When it notices a state change, it will return
/// a readiness event for the handle the next time [`poll`] is called.
///
/// See [`Poll`] docs for a high level overview.
///
/// # Arguments
///
/// `source: &mut S: event::Source`: This is the source of events that the
/// `Poll` instance should monitor for readiness state changes.
///
/// `token: Token`: The caller picks a token to associate with the socket.
/// When [`poll`] returns an event for the handle, this token is included.
/// This allows the caller to map the event to its source. The token
/// associated with the `event::Source` can be changed at any time by
/// calling [`reregister`].
///
/// See documentation on [`Token`] for an example showing how to pick
/// [`Token`] values.
///
/// `interest: Interest`: Specifies which operations `Poll` should monitor
/// for readiness. `Poll` will only return readiness events for operations
/// specified by this argument.
///
/// If a socket is registered with readable interest and the socket becomes
/// writable, no event will be returned from [`poll`].
///
/// The readiness interest for an `event::Source` can be changed at any time
/// by calling [`reregister`].
///
/// # Notes
///
/// Callers must ensure that if a source being registered with a `Poll`
/// instance was previously registered with that `Poll` instance, then a
/// call to [`deregister`] has already occurred. Consecutive calls to
/// `register` is unspecified behavior.
///
/// Unless otherwise specified, the caller should assume that once an event
/// source is registered with a `Poll` instance, it is bound to that `Poll`
/// instance for the lifetime of the event source. This remains true even
/// if the event source is deregistered from the poll instance using
/// [`deregister`].
///
/// [`event::Source`]: ./event/trait.Source.html
/// [`poll`]: struct.Poll.html#method.poll
/// [`reregister`]: struct.Registry.html#method.reregister
/// [`deregister`]: struct.Registry.html#method.deregister
/// [`Token`]: struct.Token.html
///
/// # Examples
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # use std::net;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll, Interest, Token};
/// use mio::net::TcpStream;
/// use std::net::SocketAddr;
/// use std::time::{Duration, Instant};
///
/// let mut poll = Poll::new()?;
///
/// let address: SocketAddr = "127.0.0.1:0".parse()?;
/// let listener = net::TcpListener::bind(address)?;
/// let mut socket = TcpStream::connect(listener.local_addr()?)?;
///
/// // Register the socket with `poll`
/// poll.registry().register(
/// &mut socket,
/// Token(0),
/// Interest::READABLE | Interest::WRITABLE)?;
///
/// let mut events = Events::with_capacity(1024);
/// let start = Instant::now();
/// let timeout = Duration::from_millis(500);
///
/// loop {
/// let elapsed = start.elapsed();
///
/// if elapsed >= timeout {
/// // Connection timed out
/// return Ok(());
/// }
///
/// let remaining = timeout - elapsed;
/// poll.poll(&mut events, Some(remaining))?;
///
/// for event in &events {
/// if event.token() == Token(0) {
/// // Something (probably) happened on the socket.
/// return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()>
where
S: event::Source + ?Sized,
{
trace!(
"registering event source with poller: token={:?}, interests={:?}",
token,
interests
);
source.register(self, token, interests)
}
/// Re-register an [`event::Source`] with the `Poll` instance.
///
/// Re-registering an event source allows changing the details of the
/// registration. Specifically, it allows updating the associated `token`
/// and `interests` specified in previous `register` and `reregister` calls.
///
/// The `reregister` arguments fully override the previous values. In other
/// words, if a socket is registered with [`readable`] interest and the call
/// to `reregister` specifies [`writable`], then read interest is no longer
/// requested for the handle.
///
/// The event source must have previously been registered with this instance
/// of `Poll`, otherwise the behavior is unspecified.
///
/// See the [`register`] documentation for details about the function
/// arguments and see the [`struct`] docs for a high level overview of
/// polling.
///
/// # Examples
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # use std::net;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Poll, Interest, Token};
/// use mio::net::TcpStream;
/// use std::net::SocketAddr;
///
/// let poll = Poll::new()?;
///
/// let address: SocketAddr = "127.0.0.1:0".parse()?;
/// let listener = net::TcpListener::bind(address)?;
/// let mut socket = TcpStream::connect(listener.local_addr()?)?;
///
/// // Register the socket with `poll`, requesting readable
/// poll.registry().register(
/// &mut socket,
/// Token(0),
/// Interest::READABLE)?;
///
/// // Reregister the socket specifying write interest instead. Even though
/// // the token is the same it must be specified.
/// poll.registry().reregister(
/// &mut socket,
/// Token(0),
/// Interest::WRITABLE)?;
/// # Ok(())
/// # }
/// ```
///
/// [`event::Source`]: ./event/trait.Source.html
/// [`struct`]: struct.Poll.html
/// [`register`]: struct.Registry.html#method.register
/// [`readable`]: ./event/struct.Event.html#is_readable
/// [`writable`]: ./event/struct.Event.html#is_writable
pub fn reregister<S>(&self, source: &mut S, token: Token, interests: Interest) -> io::Result<()>
where
S: event::Source + ?Sized,
{
trace!(
"reregistering event source with poller: token={:?}, interests={:?}",
token,
interests
);
source.reregister(self, token, interests)
}
/// Deregister an [`event::Source`] with the `Poll` instance.
///
/// When an event source is deregistered, the `Poll` instance will no longer
/// monitor it for readiness state changes. Deregistering clears up any
/// internal resources needed to track the handle. After an explicit call
/// to this method completes, it is guaranteed that the token previously
/// registered to this handle will not be returned by a future poll, so long
/// as a happens-before relationship is established between this call and
/// the poll.
///
/// The event source must have previously been registered with this instance
/// of `Poll`, otherwise the behavior is unspecified.
///
/// A handle can be passed back to `register` after it has been
/// deregistered; however, it must be passed back to the **same** `Poll`
/// instance, otherwise the behavior is unspecified.
///
/// # Examples
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # use std::net;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Poll, Interest, Token};
/// use mio::net::TcpStream;
/// use std::net::SocketAddr;
/// use std::time::Duration;
///
/// let mut poll = Poll::new()?;
///
/// let address: SocketAddr = "127.0.0.1:0".parse()?;
/// let listener = net::TcpListener::bind(address)?;
/// let mut socket = TcpStream::connect(listener.local_addr()?)?;
///
/// // Register the socket with `poll`
/// poll.registry().register(
/// &mut socket,
/// Token(0),
/// Interest::READABLE)?;
///
/// poll.registry().deregister(&mut socket)?;
///
/// let mut events = Events::with_capacity(1024);
///
/// // Set a timeout because this poll should never receive any events.
/// poll.poll(&mut events, Some(Duration::from_secs(1)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<S>(&self, source: &mut S) -> io::Result<()>
where
S: event::Source + ?Sized,
{
trace!("deregistering event source from poller");
source.deregister(self)
}
/// Creates a new independently owned `Registry`.
///
/// Event sources registered with this `Registry` will be registered with
/// the original `Registry` and `Poll` instance.
pub fn try_clone(&self) -> io::Result<Registry> {
self.selector.try_clone().map(|selector| Registry {
selector,
#[cfg(all(debug_assertions, not(target_os = "wasi")))]
has_waker: Arc::clone(&self.has_waker),
})
}
/// Internal check to ensure only a single `Waker` is active per [`Poll`]
/// instance.
#[cfg(all(debug_assertions, not(target_os = "wasi")))]
pub(crate) fn register_waker(&self) {
assert!(
!self.has_waker.swap(true, Ordering::AcqRel),
"Only a single `Waker` can be active per `Poll` instance"
);
}
/// Get access to the `sys::Selector`.
#[cfg(any(not(target_os = "wasi"), feature = "net"))]
pub(crate) fn selector(&self) -> &sys::Selector {
&self.selector
}
}
impl fmt::Debug for Registry {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Registry").finish()
}
}
#[cfg(all(
unix,
not(mio_unsupported_force_poll_poll),
not(any(target_os = "solaris", target_os = "vita"))
))]
impl AsRawFd for Registry {
fn as_raw_fd(&self) -> RawFd {
self.selector.as_raw_fd()
}
}
cfg_os_poll! {
#[cfg(all(
unix,
not(mio_unsupported_force_poll_poll),
not(any(target_os = "solaris", target_os = "vita")),
))]
#[test]
pub fn as_raw_fd() {
let poll = Poll::new().unwrap();
assert!(poll.as_raw_fd() > 0);
}
}

88
third-party/vendor/mio/src/sys/mod.rs vendored Normal file
View file

@ -0,0 +1,88 @@
//! Module with system specific types.
//!
//! Required types:
//!
//! * `Event`: a type alias for the system specific event, e.g. `kevent` or
//! `epoll_event`.
//! * `event`: a module with various helper functions for `Event`, see
//! [`crate::event::Event`] for the required functions.
//! * `Events`: collection of `Event`s, see [`crate::Events`].
//! * `IoSourceState`: state for the `IoSource` type.
//! * `Selector`: selector used to register event sources and poll for events,
//! see [`crate::Poll`] and [`crate::Registry`] for required
//! methods.
//! * `tcp` and `udp` modules: see the [`crate::net`] module.
//! * `Waker`: see [`crate::Waker`].
cfg_os_poll! {
macro_rules! debug_detail {
(
$type: ident ($event_type: ty), $test: path,
$($(#[$target: meta])* $libc: ident :: $flag: ident),+ $(,)*
) => {
struct $type($event_type);
impl fmt::Debug for $type {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut written_one = false;
$(
$(#[$target])*
#[allow(clippy::bad_bit_mask)] // Apparently some flags are zero.
{
// Windows doesn't use `libc` but the `afd` module.
if $test(&self.0, &$libc :: $flag) {
if !written_one {
write!(f, "{}", stringify!($flag))?;
written_one = true;
} else {
write!(f, "|{}", stringify!($flag))?;
}
}
}
)+
if !written_one {
write!(f, "(empty)")
} else {
Ok(())
}
}
}
};
}
}
#[cfg(unix)]
cfg_os_poll! {
mod unix;
#[allow(unused_imports)]
pub use self::unix::*;
}
#[cfg(windows)]
cfg_os_poll! {
mod windows;
pub use self::windows::*;
}
#[cfg(target_os = "wasi")]
cfg_os_poll! {
mod wasi;
pub(crate) use self::wasi::*;
}
cfg_not_os_poll! {
mod shell;
pub(crate) use self::shell::*;
#[cfg(unix)]
cfg_any_os_ext! {
mod unix;
#[cfg(feature = "os-ext")]
pub use self::unix::SourceFd;
}
#[cfg(unix)]
cfg_net! {
pub use self::unix::SocketAddr;
}
}

View file

@ -0,0 +1,101 @@
macro_rules! os_required {
() => {
panic!("mio must be compiled with `os-poll` to run.")
};
}
mod selector;
pub(crate) use self::selector::{event, Event, Events, Selector};
#[cfg(not(target_os = "wasi"))]
mod waker;
#[cfg(not(target_os = "wasi"))]
pub(crate) use self::waker::Waker;
cfg_net! {
pub(crate) mod tcp;
pub(crate) mod udp;
#[cfg(unix)]
pub(crate) mod uds;
}
cfg_io_source! {
use std::io;
#[cfg(windows)]
use std::os::windows::io::RawSocket;
#[cfg(unix)]
use std::os::unix::io::RawFd;
#[cfg(any(windows, unix))]
use crate::{Registry, Token, Interest};
pub(crate) struct IoSourceState;
impl IoSourceState {
pub fn new() -> IoSourceState {
IoSourceState
}
pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
where
F: FnOnce(&T) -> io::Result<R>,
{
// We don't hold state, so we can just call the function and
// return.
f(io)
}
}
#[cfg(unix)]
impl IoSourceState {
pub fn register(
&mut self,
_: &Registry,
_: Token,
_: Interest,
_: RawFd,
) -> io::Result<()> {
os_required!()
}
pub fn reregister(
&mut self,
_: &Registry,
_: Token,
_: Interest,
_: RawFd,
) -> io::Result<()> {
os_required!()
}
pub fn deregister(&mut self, _: &Registry, _: RawFd) -> io::Result<()> {
os_required!()
}
}
#[cfg(windows)]
impl IoSourceState {
pub fn register(
&mut self,
_: &Registry,
_: Token,
_: Interest,
_: RawSocket,
) -> io::Result<()> {
os_required!()
}
pub fn reregister(
&mut self,
_: &Registry,
_: Token,
_: Interest,
) -> io::Result<()> {
os_required!()
}
pub fn deregister(&mut self) -> io::Result<()> {
os_required!()
}
}
}

View file

@ -0,0 +1,122 @@
use std::io;
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, RawFd};
use std::time::Duration;
pub type Event = usize;
pub type Events = Vec<Event>;
#[derive(Debug)]
pub struct Selector {}
impl Selector {
pub fn try_clone(&self) -> io::Result<Selector> {
os_required!();
}
pub fn select(&self, _: &mut Events, _: Option<Duration>) -> io::Result<()> {
os_required!();
}
}
#[cfg(unix)]
cfg_any_os_ext! {
use crate::{Interest, Token};
impl Selector {
pub fn register(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
os_required!();
}
pub fn reregister(&self, _: RawFd, _: Token, _: Interest) -> io::Result<()> {
os_required!();
}
pub fn deregister(&self, _: RawFd) -> io::Result<()> {
os_required!();
}
}
}
#[cfg(target_os = "wasi")]
cfg_any_os_ext! {
use crate::{Interest, Token};
impl Selector {
pub fn register(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> {
os_required!();
}
pub fn reregister(&self, _: wasi::Fd, _: Token, _: Interest) -> io::Result<()> {
os_required!();
}
pub fn deregister(&self, _: wasi::Fd) -> io::Result<()> {
os_required!();
}
}
}
cfg_io_source! {
#[cfg(debug_assertions)]
impl Selector {
pub fn id(&self) -> usize {
os_required!();
}
}
}
#[cfg(unix)]
impl AsRawFd for Selector {
fn as_raw_fd(&self) -> RawFd {
os_required!()
}
}
#[allow(clippy::trivially_copy_pass_by_ref)]
pub mod event {
use crate::sys::Event;
use crate::Token;
use std::fmt;
pub fn token(_: &Event) -> Token {
os_required!();
}
pub fn is_readable(_: &Event) -> bool {
os_required!();
}
pub fn is_writable(_: &Event) -> bool {
os_required!();
}
pub fn is_error(_: &Event) -> bool {
os_required!();
}
pub fn is_read_closed(_: &Event) -> bool {
os_required!();
}
pub fn is_write_closed(_: &Event) -> bool {
os_required!();
}
pub fn is_priority(_: &Event) -> bool {
os_required!();
}
pub fn is_aio(_: &Event) -> bool {
os_required!();
}
pub fn is_lio(_: &Event) -> bool {
os_required!();
}
pub fn debug_details(_: &mut fmt::Formatter<'_>, _: &Event) -> fmt::Result {
os_required!();
}
}

View file

@ -0,0 +1,31 @@
use std::io;
use std::net::{self, SocketAddr};
#[cfg(not(target_os = "wasi"))]
pub(crate) fn new_for_addr(_: SocketAddr) -> io::Result<i32> {
os_required!();
}
#[cfg(not(target_os = "wasi"))]
pub(crate) fn bind(_: &net::TcpListener, _: SocketAddr) -> io::Result<()> {
os_required!();
}
#[cfg(not(target_os = "wasi"))]
pub(crate) fn connect(_: &net::TcpStream, _: SocketAddr) -> io::Result<()> {
os_required!();
}
#[cfg(not(target_os = "wasi"))]
pub(crate) fn listen(_: &net::TcpListener, _: u32) -> io::Result<()> {
os_required!();
}
#[cfg(unix)]
pub(crate) fn set_reuseaddr(_: &net::TcpListener, _: bool) -> io::Result<()> {
os_required!();
}
pub(crate) fn accept(_: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
os_required!();
}

View file

@ -0,0 +1,11 @@
#![cfg(not(target_os = "wasi"))]
use std::io;
use std::net::{self, SocketAddr};
pub fn bind(_: SocketAddr) -> io::Result<net::UdpSocket> {
os_required!()
}
pub(crate) fn only_v6(_: &net::UdpSocket) -> io::Result<bool> {
os_required!()
}

View file

@ -0,0 +1,83 @@
pub(crate) mod datagram {
use crate::net::SocketAddr;
use std::io;
use std::os::unix::net;
use std::path::Path;
pub(crate) fn bind(_: &Path) -> io::Result<net::UnixDatagram> {
os_required!()
}
pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
os_required!()
}
pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
os_required!()
}
pub(crate) fn local_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> {
os_required!()
}
pub(crate) fn peer_addr(_: &net::UnixDatagram) -> io::Result<SocketAddr> {
os_required!()
}
pub(crate) fn recv_from(
_: &net::UnixDatagram,
_: &mut [u8],
) -> io::Result<(usize, SocketAddr)> {
os_required!()
}
}
pub(crate) mod listener {
use crate::net::{SocketAddr, UnixStream};
use std::io;
use std::os::unix::net;
use std::path::Path;
pub(crate) fn bind(_: &Path) -> io::Result<net::UnixListener> {
os_required!()
}
pub(crate) fn bind_addr(_: &SocketAddr) -> io::Result<net::UnixListener> {
os_required!()
}
pub(crate) fn accept(_: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
os_required!()
}
pub(crate) fn local_addr(_: &net::UnixListener) -> io::Result<SocketAddr> {
os_required!()
}
}
pub(crate) mod stream {
use crate::net::SocketAddr;
use std::io;
use std::os::unix::net;
use std::path::Path;
pub(crate) fn connect(_: &Path) -> io::Result<net::UnixStream> {
os_required!()
}
pub(crate) fn connect_addr(_: &SocketAddr) -> io::Result<net::UnixStream> {
os_required!()
}
pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
os_required!()
}
pub(crate) fn local_addr(_: &net::UnixStream) -> io::Result<SocketAddr> {
os_required!()
}
pub(crate) fn peer_addr(_: &net::UnixStream) -> io::Result<SocketAddr> {
os_required!()
}
}

View file

@ -0,0 +1,16 @@
use crate::sys::Selector;
use crate::Token;
use std::io;
#[derive(Debug)]
pub struct Waker {}
impl Waker {
pub fn new(_: &Selector, _: Token) -> io::Result<Waker> {
os_required!();
}
pub fn wake(&self) -> io::Result<()> {
os_required!();
}
}

View file

@ -0,0 +1,126 @@
/// Helper macro to execute a system call that returns an `io::Result`.
//
// Macro must be defined before any modules that uses them.
#[allow(unused_macros)]
macro_rules! syscall {
($fn: ident ( $($arg: expr),* $(,)* ) ) => {{
let res = unsafe { libc::$fn($($arg, )*) };
if res == -1 {
Err(std::io::Error::last_os_error())
} else {
Ok(res)
}
}};
}
cfg_os_poll! {
mod selector;
pub(crate) use self::selector::{event, Event, Events, Selector};
mod sourcefd;
#[cfg(feature = "os-ext")]
pub use self::sourcefd::SourceFd;
mod waker;
pub(crate) use self::waker::Waker;
cfg_net! {
mod net;
pub(crate) mod tcp;
pub(crate) mod udp;
pub(crate) mod uds;
pub use self::uds::SocketAddr;
}
cfg_io_source! {
// Both `kqueue` and `epoll` don't need to hold any user space state.
#[cfg(not(any(mio_unsupported_force_poll_poll, target_os = "solaris", target_os = "vita")))]
mod stateless_io_source {
use std::io;
use std::os::unix::io::RawFd;
use crate::Registry;
use crate::Token;
use crate::Interest;
pub(crate) struct IoSourceState;
impl IoSourceState {
pub fn new() -> IoSourceState {
IoSourceState
}
pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
where
F: FnOnce(&T) -> io::Result<R>,
{
// We don't hold state, so we can just call the function and
// return.
f(io)
}
pub fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
fd: RawFd,
) -> io::Result<()> {
// Pass through, we don't have any state
registry.selector().register(fd, token, interests)
}
pub fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
fd: RawFd,
) -> io::Result<()> {
// Pass through, we don't have any state
registry.selector().reregister(fd, token, interests)
}
pub fn deregister(&mut self, registry: &Registry, fd: RawFd) -> io::Result<()> {
// Pass through, we don't have any state
registry.selector().deregister(fd)
}
}
}
#[cfg(not(any(mio_unsupported_force_poll_poll, target_os = "solaris",target_os = "vita")))]
pub(crate) use self::stateless_io_source::IoSourceState;
#[cfg(any(mio_unsupported_force_poll_poll, target_os = "solaris", target_os = "vita"))]
pub(crate) use self::selector::IoSourceState;
}
#[cfg(any(
// For the public `pipe` module, must match `cfg_os_ext` macro.
feature = "os-ext",
// For the `Waker` type based on a pipe.
mio_unsupported_force_waker_pipe,
target_os = "aix",
target_os = "dragonfly",
target_os = "illumos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "redox",
target_os = "solaris",
target_os = "vita",
))]
pub(crate) mod pipe;
}
cfg_not_os_poll! {
cfg_net! {
mod uds;
pub use self::uds::SocketAddr;
}
cfg_any_os_ext! {
mod sourcefd;
#[cfg(feature = "os-ext")]
pub use self::sourcefd::SourceFd;
}
}

View file

@ -0,0 +1,195 @@
use std::io;
use std::mem::size_of;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: libc::c_int) -> io::Result<libc::c_int> {
let domain = match addr {
SocketAddr::V4(..) => libc::AF_INET,
SocketAddr::V6(..) => libc::AF_INET6,
};
new_socket(domain, socket_type)
}
/// Create a new non-blocking socket.
pub(crate) fn new_socket(domain: libc::c_int, socket_type: libc::c_int) -> io::Result<libc::c_int> {
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris",
))]
let socket_type = socket_type | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
let socket = syscall!(socket(domain, socket_type, 0))?;
// Mimick `libstd` and set `SO_NOSIGPIPE` on apple systems.
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
if let Err(err) = syscall!(setsockopt(
socket,
libc::SOL_SOCKET,
libc::SO_NOSIGPIPE,
&1 as *const libc::c_int as *const libc::c_void,
size_of::<libc::c_int>() as libc::socklen_t
)) {
let _ = syscall!(close(socket));
return Err(err);
}
// Darwin (and others) doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
))]
{
if let Err(err) = syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK)) {
let _ = syscall!(close(socket));
return Err(err);
}
#[cfg(not(any(target_os = "espidf", target_os = "vita")))]
if let Err(err) = syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC)) {
let _ = syscall!(close(socket));
return Err(err);
}
}
Ok(socket)
}
/// A type with the same memory layout as `libc::sockaddr`. Used in converting Rust level
/// SocketAddr* types into their system representation. The benefit of this specific
/// type over using `libc::sockaddr_storage` is that this type is exactly as large as it
/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
#[repr(C)]
pub(crate) union SocketAddrCRepr {
v4: libc::sockaddr_in,
v6: libc::sockaddr_in6,
}
impl SocketAddrCRepr {
pub(crate) fn as_ptr(&self) -> *const libc::sockaddr {
self as *const _ as *const libc::sockaddr
}
}
/// Converts a Rust `SocketAddr` into the system representation.
pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, libc::socklen_t) {
match addr {
SocketAddr::V4(ref addr) => {
// `s_addr` is stored as BE on all machine and the array is in BE order.
// So the native endian conversion method is used so that it's never swapped.
let sin_addr = libc::in_addr {
s_addr: u32::from_ne_bytes(addr.ip().octets()),
};
let sockaddr_in = libc::sockaddr_in {
sin_family: libc::AF_INET as libc::sa_family_t,
sin_port: addr.port().to_be(),
sin_addr,
#[cfg(not(target_os = "vita"))]
sin_zero: [0; 8],
#[cfg(target_os = "vita")]
sin_zero: [0; 6],
#[cfg(any(
target_os = "aix",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
))]
sin_len: 0,
#[cfg(target_os = "vita")]
sin_vport: addr.port().to_be(),
};
let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
let socklen = size_of::<libc::sockaddr_in>() as libc::socklen_t;
(sockaddr, socklen)
}
SocketAddr::V6(ref addr) => {
let sockaddr_in6 = libc::sockaddr_in6 {
sin6_family: libc::AF_INET6 as libc::sa_family_t,
sin6_port: addr.port().to_be(),
sin6_addr: libc::in6_addr {
s6_addr: addr.ip().octets(),
},
sin6_flowinfo: addr.flowinfo(),
sin6_scope_id: addr.scope_id(),
#[cfg(any(
target_os = "aix",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
))]
sin6_len: 0,
#[cfg(target_os = "vita")]
sin6_vport: addr.port().to_be(),
#[cfg(any(target_os = "illumos", target_os = "solaris"))]
__sin6_src_id: 0,
};
let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
let socklen = size_of::<libc::sockaddr_in6>() as libc::socklen_t;
(sockaddr, socklen)
}
}
}
/// Converts a `libc::sockaddr` compatible struct into a native Rust `SocketAddr`.
///
/// # Safety
///
/// `storage` must have the `ss_family` field correctly initialized.
/// `storage` must be initialised to a `sockaddr_in` or `sockaddr_in6`.
pub(crate) unsafe fn to_socket_addr(
storage: *const libc::sockaddr_storage,
) -> io::Result<SocketAddr> {
match (*storage).ss_family as libc::c_int {
libc::AF_INET => {
// Safety: if the ss_family field is AF_INET then storage must be a sockaddr_in.
let addr: &libc::sockaddr_in = &*(storage as *const libc::sockaddr_in);
let ip = Ipv4Addr::from(addr.sin_addr.s_addr.to_ne_bytes());
let port = u16::from_be(addr.sin_port);
Ok(SocketAddr::V4(SocketAddrV4::new(ip, port)))
}
libc::AF_INET6 => {
// Safety: if the ss_family field is AF_INET6 then storage must be a sockaddr_in6.
let addr: &libc::sockaddr_in6 = &*(storage as *const libc::sockaddr_in6);
let ip = Ipv6Addr::from(addr.sin6_addr.s6_addr);
let port = u16::from_be(addr.sin6_port);
Ok(SocketAddr::V6(SocketAddrV6::new(
ip,
port,
addr.sin6_flowinfo,
addr.sin6_scope_id,
)))
}
_ => Err(io::ErrorKind::InvalidInput.into()),
}
}

View file

@ -0,0 +1,594 @@
//! Unix pipe.
//!
//! See the [`new`] function for documentation.
use std::io;
use std::os::unix::io::RawFd;
pub(crate) fn new_raw() -> io::Result<[RawFd; 2]> {
let mut fds: [RawFd; 2] = [-1, -1];
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
target_os = "illumos",
target_os = "redox",
target_os = "solaris",
target_os = "vita",
))]
unsafe {
if libc::pipe2(fds.as_mut_ptr(), libc::O_CLOEXEC | libc::O_NONBLOCK) != 0 {
return Err(io::Error::last_os_error());
}
}
#[cfg(any(
target_os = "aix",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
))]
unsafe {
// For platforms that don't have `pipe2(2)` we need to manually set the
// correct flags on the file descriptor.
if libc::pipe(fds.as_mut_ptr()) != 0 {
return Err(io::Error::last_os_error());
}
for fd in &fds {
if libc::fcntl(*fd, libc::F_SETFL, libc::O_NONBLOCK) != 0
|| libc::fcntl(*fd, libc::F_SETFD, libc::FD_CLOEXEC) != 0
{
let err = io::Error::last_os_error();
// Don't leak file descriptors. Can't handle closing error though.
let _ = libc::close(fds[0]);
let _ = libc::close(fds[1]);
return Err(err);
}
}
}
#[cfg(not(any(
target_os = "aix",
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "illumos",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "redox",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "solaris",
target_os = "vita",
)))]
compile_error!("unsupported target for `mio::unix::pipe`");
Ok(fds)
}
cfg_os_ext! {
use std::fs::File;
use std::io::{IoSlice, IoSliceMut, Read, Write};
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::process::{ChildStderr, ChildStdin, ChildStdout};
use crate::io_source::IoSource;
use crate::{event, Interest, Registry, Token};
/// Create a new non-blocking Unix pipe.
///
/// This is a wrapper around Unix's [`pipe(2)`] system call and can be used as
/// inter-process or thread communication channel.
///
/// This channel may be created before forking the process and then one end used
/// in each process, e.g. the parent process has the sending end to send command
/// to the child process.
///
/// [`pipe(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/pipe.html
///
/// # Events
///
/// The [`Sender`] can be registered with [`WRITABLE`] interest to receive
/// [writable events], the [`Receiver`] with [`READABLE`] interest. Once data is
/// written to the `Sender` the `Receiver` will receive an [readable event].
///
/// In addition to those events, events will also be generated if the other side
/// is dropped. To check if the `Sender` is dropped you'll need to check
/// [`is_read_closed`] on events for the `Receiver`, if it returns true the
/// `Sender` is dropped. On the `Sender` end check [`is_write_closed`], if it
/// returns true the `Receiver` was dropped. Also see the second example below.
///
/// [`WRITABLE`]: Interest::WRITABLE
/// [writable events]: event::Event::is_writable
/// [`READABLE`]: Interest::READABLE
/// [readable event]: event::Event::is_readable
/// [`is_read_closed`]: event::Event::is_read_closed
/// [`is_write_closed`]: event::Event::is_write_closed
///
/// # Deregistering
///
/// Both `Sender` and `Receiver` will deregister themselves when dropped,
/// **iff** the file descriptors are not duplicated (via [`dup(2)`]).
///
/// [`dup(2)`]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/dup.html
///
/// # Examples
///
/// Simple example that writes data into the sending end and read it from the
/// receiving end.
///
/// ```
/// use std::io::{self, Read, Write};
///
/// use mio::{Poll, Events, Interest, Token};
/// use mio::unix::pipe;
///
/// // Unique tokens for the two ends of the channel.
/// const PIPE_RECV: Token = Token(0);
/// const PIPE_SEND: Token = Token(1);
///
/// # fn main() -> io::Result<()> {
/// // Create our `Poll` instance and the `Events` container.
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(8);
///
/// // Create a new pipe.
/// let (mut sender, mut receiver) = pipe::new()?;
///
/// // Register both ends of the channel.
/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
///
/// const MSG: &[u8; 11] = b"Hello world";
///
/// loop {
/// poll.poll(&mut events, None)?;
///
/// for event in events.iter() {
/// match event.token() {
/// PIPE_SEND => sender.write(MSG)
/// .and_then(|n| if n != MSG.len() {
/// // We'll consider a short write an error in this
/// // example. NOTE: we can't use `write_all` with
/// // non-blocking I/O.
/// Err(io::ErrorKind::WriteZero.into())
/// } else {
/// Ok(())
/// })?,
/// PIPE_RECV => {
/// let mut buf = [0; 11];
/// let n = receiver.read(&mut buf)?;
/// println!("received: {:?}", &buf[0..n]);
/// assert_eq!(n, MSG.len());
/// assert_eq!(&buf, &*MSG);
/// return Ok(());
/// },
/// _ => unreachable!(),
/// }
/// }
/// }
/// # }
/// ```
///
/// Example that receives an event once the `Sender` is dropped.
///
/// ```
/// # use std::io;
/// #
/// # use mio::{Poll, Events, Interest, Token};
/// # use mio::unix::pipe;
/// #
/// # const PIPE_RECV: Token = Token(0);
/// # const PIPE_SEND: Token = Token(1);
/// #
/// # fn main() -> io::Result<()> {
/// // Same setup as in the example above.
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(8);
///
/// let (mut sender, mut receiver) = pipe::new()?;
///
/// poll.registry().register(&mut receiver, PIPE_RECV, Interest::READABLE)?;
/// poll.registry().register(&mut sender, PIPE_SEND, Interest::WRITABLE)?;
///
/// // Drop the sender.
/// drop(sender);
///
/// poll.poll(&mut events, None)?;
///
/// for event in events.iter() {
/// match event.token() {
/// PIPE_RECV if event.is_read_closed() => {
/// // Detected that the sender was dropped.
/// println!("Sender dropped!");
/// return Ok(());
/// },
/// _ => unreachable!(),
/// }
/// }
/// # unreachable!();
/// # }
/// ```
pub fn new() -> io::Result<(Sender, Receiver)> {
let fds = new_raw()?;
// SAFETY: `new_raw` initialised the `fds` above.
let r = unsafe { Receiver::from_raw_fd(fds[0]) };
let w = unsafe { Sender::from_raw_fd(fds[1]) };
Ok((w, r))
}
/// Sending end of an Unix pipe.
///
/// See [`new`] for documentation, including examples.
#[derive(Debug)]
pub struct Sender {
inner: IoSource<File>,
}
impl Sender {
/// Set the `Sender` into or out of non-blocking mode.
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
set_nonblocking(self.inner.as_raw_fd(), nonblocking)
}
/// Execute an I/O operation ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use std::io;
/// use std::os::unix::io::AsRawFd;
/// use mio::unix::pipe;
///
/// let (sender, receiver) = pipe::new()?;
///
/// // Wait until the sender is writable...
///
/// // Write to the sender using a direct libc call, of course the
/// // `io::Write` implementation would be easier to use.
/// let buf = b"hello";
/// let n = sender.try_io(|| {
/// let buf_ptr = &buf as *const _ as *const _;
/// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("write {} bytes", n);
///
/// // Wait until the receiver is readable...
///
/// // Read from the receiver using a direct libc call, of course the
/// // `io::Read` implementation would be easier to use.
/// let mut buf = [0; 512];
/// let n = receiver.try_io(|| {
/// let buf_ptr = &mut buf as *mut _ as *mut _;
/// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("read {} bytes", n);
/// # Ok(())
/// # }
/// ```
pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T>,
{
self.inner.do_io(|_| f())
}
}
impl event::Source for Sender {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl Write for Sender {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.write(buf))
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.write_vectored(bufs))
}
fn flush(&mut self) -> io::Result<()> {
self.inner.do_io(|mut sender| sender.flush())
}
}
impl Write for &Sender {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.write(buf))
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.write_vectored(bufs))
}
fn flush(&mut self) -> io::Result<()> {
self.inner.do_io(|mut sender| sender.flush())
}
}
/// # Notes
///
/// The underlying pipe is **not** set to non-blocking.
impl From<ChildStdin> for Sender {
fn from(stdin: ChildStdin) -> Sender {
// Safety: `ChildStdin` is guaranteed to be a valid file descriptor.
unsafe { Sender::from_raw_fd(stdin.into_raw_fd()) }
}
}
impl FromRawFd for Sender {
unsafe fn from_raw_fd(fd: RawFd) -> Sender {
Sender {
inner: IoSource::new(File::from_raw_fd(fd)),
}
}
}
impl AsRawFd for Sender {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl IntoRawFd for Sender {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
/// Receiving end of an Unix pipe.
///
/// See [`new`] for documentation, including examples.
#[derive(Debug)]
pub struct Receiver {
inner: IoSource<File>,
}
impl Receiver {
/// Set the `Receiver` into or out of non-blocking mode.
pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> {
set_nonblocking(self.inner.as_raw_fd(), nonblocking)
}
/// Execute an I/O operation ensuring that the socket receives more events
/// if it hits a [`WouldBlock`] error.
///
/// # Notes
///
/// This method is required to be called for **all** I/O operations to
/// ensure the user will receive events once the socket is ready again after
/// returning a [`WouldBlock`] error.
///
/// [`WouldBlock`]: io::ErrorKind::WouldBlock
///
/// # Examples
///
/// ```
/// # use std::error::Error;
/// #
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use std::io;
/// use std::os::unix::io::AsRawFd;
/// use mio::unix::pipe;
///
/// let (sender, receiver) = pipe::new()?;
///
/// // Wait until the sender is writable...
///
/// // Write to the sender using a direct libc call, of course the
/// // `io::Write` implementation would be easier to use.
/// let buf = b"hello";
/// let n = sender.try_io(|| {
/// let buf_ptr = &buf as *const _ as *const _;
/// let res = unsafe { libc::write(sender.as_raw_fd(), buf_ptr, buf.len()) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::write, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("write {} bytes", n);
///
/// // Wait until the receiver is readable...
///
/// // Read from the receiver using a direct libc call, of course the
/// // `io::Read` implementation would be easier to use.
/// let mut buf = [0; 512];
/// let n = receiver.try_io(|| {
/// let buf_ptr = &mut buf as *mut _ as *mut _;
/// let res = unsafe { libc::read(receiver.as_raw_fd(), buf_ptr, buf.len()) };
/// if res != -1 {
/// Ok(res as usize)
/// } else {
/// // If EAGAIN or EWOULDBLOCK is set by libc::read, the closure
/// // should return `WouldBlock` error.
/// Err(io::Error::last_os_error())
/// }
/// })?;
/// eprintln!("read {} bytes", n);
/// # Ok(())
/// # }
/// ```
pub fn try_io<F, T>(&self, f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T>,
{
self.inner.do_io(|_| f())
}
}
impl event::Source for Receiver {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.register(registry, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(registry, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
self.inner.deregister(registry)
}
}
impl Read for Receiver {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.read(buf))
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.read_vectored(bufs))
}
}
impl Read for &Receiver {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.read(buf))
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.inner.do_io(|mut sender| sender.read_vectored(bufs))
}
}
/// # Notes
///
/// The underlying pipe is **not** set to non-blocking.
impl From<ChildStdout> for Receiver {
fn from(stdout: ChildStdout) -> Receiver {
// Safety: `ChildStdout` is guaranteed to be a valid file descriptor.
unsafe { Receiver::from_raw_fd(stdout.into_raw_fd()) }
}
}
/// # Notes
///
/// The underlying pipe is **not** set to non-blocking.
impl From<ChildStderr> for Receiver {
fn from(stderr: ChildStderr) -> Receiver {
// Safety: `ChildStderr` is guaranteed to be a valid file descriptor.
unsafe { Receiver::from_raw_fd(stderr.into_raw_fd()) }
}
}
impl FromRawFd for Receiver {
unsafe fn from_raw_fd(fd: RawFd) -> Receiver {
Receiver {
inner: IoSource::new(File::from_raw_fd(fd)),
}
}
}
impl AsRawFd for Receiver {
fn as_raw_fd(&self) -> RawFd {
self.inner.as_raw_fd()
}
}
impl IntoRawFd for Receiver {
fn into_raw_fd(self) -> RawFd {
self.inner.into_inner().into_raw_fd()
}
}
#[cfg(not(any(target_os = "illumos", target_os = "solaris", target_os = "vita")))]
fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
let value = nonblocking as libc::c_int;
if unsafe { libc::ioctl(fd, libc::FIONBIO, &value) } == -1 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
#[cfg(any(target_os = "illumos", target_os = "solaris", target_os = "vita"))]
fn set_nonblocking(fd: RawFd, nonblocking: bool) -> io::Result<()> {
let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) };
if flags < 0 {
return Err(io::Error::last_os_error());
}
let nflags = if nonblocking {
flags | libc::O_NONBLOCK
} else {
flags & !libc::O_NONBLOCK
};
if flags != nflags {
if unsafe { libc::fcntl(fd, libc::F_SETFL, nflags) } < 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
} // `cfg_os_ext!`.

View file

@ -0,0 +1,278 @@
use crate::{Interest, Token};
use libc::{EPOLLET, EPOLLIN, EPOLLOUT, EPOLLPRI, EPOLLRDHUP};
use std::os::unix::io::{AsRawFd, RawFd};
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use std::{cmp, i32, io, ptr};
/// Unique id for use as `SelectorId`.
#[cfg(debug_assertions)]
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
#[derive(Debug)]
pub struct Selector {
#[cfg(debug_assertions)]
id: usize,
ep: RawFd,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
#[cfg(not(target_os = "android"))]
let res = syscall!(epoll_create1(libc::EPOLL_CLOEXEC));
// On Android < API level 16 `epoll_create1` is not defined, so use a
// raw system call.
// According to libuv, `EPOLL_CLOEXEC` is not defined on Android API <
// 21. But `EPOLL_CLOEXEC` is an alias for `O_CLOEXEC` on that platform,
// so we use it instead.
#[cfg(target_os = "android")]
let res = syscall!(syscall(libc::SYS_epoll_create1, libc::O_CLOEXEC));
let ep = match res {
Ok(ep) => ep as RawFd,
Err(err) => {
// When `epoll_create1` is not available fall back to use
// `epoll_create` followed by `fcntl`.
if let Some(libc::ENOSYS) = err.raw_os_error() {
match syscall!(epoll_create(1024)) {
Ok(ep) => match syscall!(fcntl(ep, libc::F_SETFD, libc::FD_CLOEXEC)) {
Ok(ep) => ep as RawFd,
Err(err) => {
// `fcntl` failed, cleanup `ep`.
let _ = unsafe { libc::close(ep) };
return Err(err);
}
},
Err(err) => return Err(err),
}
} else {
return Err(err);
}
}
};
Ok(Selector {
#[cfg(debug_assertions)]
id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
ep,
})
}
pub fn try_clone(&self) -> io::Result<Selector> {
syscall!(fcntl(self.ep, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|ep| Selector {
// It's the same selector, so we use the same id.
#[cfg(debug_assertions)]
id: self.id,
ep,
})
}
pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
// A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
// (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
// architectures. The magic number is the same constant used by libuv.
#[cfg(target_pointer_width = "32")]
const MAX_SAFE_TIMEOUT: u128 = 1789569;
#[cfg(not(target_pointer_width = "32"))]
const MAX_SAFE_TIMEOUT: u128 = libc::c_int::max_value() as u128;
let timeout = timeout
.map(|to| {
// `Duration::as_millis` truncates, so round up. This avoids
// turning sub-millisecond timeouts into a zero timeout, unless
// the caller explicitly requests that by specifying a zero
// timeout.
let to_ms = to
.checked_add(Duration::from_nanos(999_999))
.unwrap_or(to)
.as_millis();
cmp::min(MAX_SAFE_TIMEOUT, to_ms) as libc::c_int
})
.unwrap_or(-1);
events.clear();
syscall!(epoll_wait(
self.ep,
events.as_mut_ptr(),
events.capacity() as i32,
timeout,
))
.map(|n_events| {
// This is safe because `epoll_wait` ensures that `n_events` are
// assigned.
unsafe { events.set_len(n_events as usize) };
})
}
pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
let mut event = libc::epoll_event {
events: interests_to_epoll(interests),
u64: usize::from(token) as u64,
#[cfg(target_os = "redox")]
_pad: 0,
};
syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_ADD, fd, &mut event)).map(|_| ())
}
pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
let mut event = libc::epoll_event {
events: interests_to_epoll(interests),
u64: usize::from(token) as u64,
#[cfg(target_os = "redox")]
_pad: 0,
};
syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_MOD, fd, &mut event)).map(|_| ())
}
pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
syscall!(epoll_ctl(self.ep, libc::EPOLL_CTL_DEL, fd, ptr::null_mut())).map(|_| ())
}
}
cfg_io_source! {
impl Selector {
#[cfg(debug_assertions)]
pub fn id(&self) -> usize {
self.id
}
}
}
impl AsRawFd for Selector {
fn as_raw_fd(&self) -> RawFd {
self.ep
}
}
impl Drop for Selector {
fn drop(&mut self) {
if let Err(err) = syscall!(close(self.ep)) {
error!("error closing epoll: {}", err);
}
}
}
fn interests_to_epoll(interests: Interest) -> u32 {
let mut kind = EPOLLET;
if interests.is_readable() {
kind = kind | EPOLLIN | EPOLLRDHUP;
}
if interests.is_writable() {
kind |= EPOLLOUT;
}
if interests.is_priority() {
kind |= EPOLLPRI;
}
kind as u32
}
pub type Event = libc::epoll_event;
pub type Events = Vec<Event>;
pub mod event {
use std::fmt;
use crate::sys::Event;
use crate::Token;
pub fn token(event: &Event) -> Token {
Token(event.u64 as usize)
}
pub fn is_readable(event: &Event) -> bool {
(event.events as libc::c_int & libc::EPOLLIN) != 0
|| (event.events as libc::c_int & libc::EPOLLPRI) != 0
}
pub fn is_writable(event: &Event) -> bool {
(event.events as libc::c_int & libc::EPOLLOUT) != 0
}
pub fn is_error(event: &Event) -> bool {
(event.events as libc::c_int & libc::EPOLLERR) != 0
}
pub fn is_read_closed(event: &Event) -> bool {
// Both halves of the socket have closed
event.events as libc::c_int & libc::EPOLLHUP != 0
// Socket has received FIN or called shutdown(SHUT_RD)
|| (event.events as libc::c_int & libc::EPOLLIN != 0
&& event.events as libc::c_int & libc::EPOLLRDHUP != 0)
}
pub fn is_write_closed(event: &Event) -> bool {
// Both halves of the socket have closed
event.events as libc::c_int & libc::EPOLLHUP != 0
// Unix pipe write end has closed
|| (event.events as libc::c_int & libc::EPOLLOUT != 0
&& event.events as libc::c_int & libc::EPOLLERR != 0)
// The other side (read end) of a Unix pipe has closed.
|| event.events as libc::c_int == libc::EPOLLERR
}
pub fn is_priority(event: &Event) -> bool {
(event.events as libc::c_int & libc::EPOLLPRI) != 0
}
pub fn is_aio(_: &Event) -> bool {
// Not supported in the kernel, only in libc.
false
}
pub fn is_lio(_: &Event) -> bool {
// Not supported.
false
}
pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
#[allow(clippy::trivially_copy_pass_by_ref)]
fn check_events(got: &u32, want: &libc::c_int) -> bool {
(*got as libc::c_int & want) != 0
}
debug_detail!(
EventsDetails(u32),
check_events,
libc::EPOLLIN,
libc::EPOLLPRI,
libc::EPOLLOUT,
libc::EPOLLRDNORM,
libc::EPOLLRDBAND,
libc::EPOLLWRNORM,
libc::EPOLLWRBAND,
libc::EPOLLMSG,
libc::EPOLLERR,
libc::EPOLLHUP,
libc::EPOLLET,
libc::EPOLLRDHUP,
libc::EPOLLONESHOT,
#[cfg(target_os = "linux")]
libc::EPOLLEXCLUSIVE,
#[cfg(any(target_os = "android", target_os = "linux"))]
libc::EPOLLWAKEUP,
libc::EPOLL_CLOEXEC,
);
// Can't reference fields in packed structures.
let e_u64 = event.u64;
f.debug_struct("epoll_event")
.field("events", &EventsDetails(event.events))
.field("u64", &e_u64)
.finish()
}
}
#[cfg(target_os = "android")]
#[test]
fn assert_close_on_exec_flag() {
// This assertion need to be true for Selector::new.
assert_eq!(libc::O_CLOEXEC, libc::EPOLL_CLOEXEC);
}

View file

@ -0,0 +1,849 @@
use crate::{Interest, Token};
use std::mem::{self, MaybeUninit};
use std::ops::{Deref, DerefMut};
use std::os::unix::io::{AsRawFd, RawFd};
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Duration;
use std::{cmp, io, ptr, slice};
/// Unique id for use as `SelectorId`.
#[cfg(debug_assertions)]
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
// Type of the `nchanges` and `nevents` parameters in the `kevent` function.
#[cfg(not(target_os = "netbsd"))]
type Count = libc::c_int;
#[cfg(target_os = "netbsd")]
type Count = libc::size_t;
// Type of the `filter` field in the `kevent` structure.
#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
type Filter = libc::c_short;
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
type Filter = i16;
#[cfg(target_os = "netbsd")]
type Filter = u32;
// Type of the `flags` field in the `kevent` structure.
#[cfg(any(target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd"))]
type Flags = libc::c_ushort;
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
type Flags = u16;
#[cfg(target_os = "netbsd")]
type Flags = u32;
// Type of the `udata` field in the `kevent` structure.
#[cfg(not(target_os = "netbsd"))]
type UData = *mut libc::c_void;
#[cfg(target_os = "netbsd")]
type UData = libc::intptr_t;
macro_rules! kevent {
($id: expr, $filter: expr, $flags: expr, $data: expr) => {
libc::kevent {
ident: $id as libc::uintptr_t,
filter: $filter as Filter,
flags: $flags,
udata: $data as UData,
..unsafe { mem::zeroed() }
}
};
}
#[derive(Debug)]
pub struct Selector {
#[cfg(debug_assertions)]
id: usize,
kq: RawFd,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
let kq = syscall!(kqueue())?;
let selector = Selector {
#[cfg(debug_assertions)]
id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
kq,
};
syscall!(fcntl(kq, libc::F_SETFD, libc::FD_CLOEXEC))?;
Ok(selector)
}
pub fn try_clone(&self) -> io::Result<Selector> {
syscall!(fcntl(self.kq, libc::F_DUPFD_CLOEXEC, super::LOWEST_FD)).map(|kq| Selector {
// It's the same selector, so we use the same id.
#[cfg(debug_assertions)]
id: self.id,
kq,
})
}
pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
let timeout = timeout.map(|to| libc::timespec {
tv_sec: cmp::min(to.as_secs(), libc::time_t::max_value() as u64) as libc::time_t,
// `Duration::subsec_nanos` is guaranteed to be less than one
// billion (the number of nanoseconds in a second), making the
// cast to i32 safe. The cast itself is needed for platforms
// where C's long is only 32 bits.
tv_nsec: libc::c_long::from(to.subsec_nanos() as i32),
});
let timeout = timeout
.as_ref()
.map(|s| s as *const _)
.unwrap_or(ptr::null_mut());
events.clear();
syscall!(kevent(
self.kq,
ptr::null(),
0,
events.as_mut_ptr(),
events.capacity() as Count,
timeout,
))
.map(|n_events| {
// This is safe because `kevent` ensures that `n_events` are
// assigned.
unsafe { events.set_len(n_events as usize) };
})
}
pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
let flags = libc::EV_CLEAR | libc::EV_RECEIPT | libc::EV_ADD;
// At most we need two changes, but maybe we only need 1.
let mut changes: [MaybeUninit<libc::kevent>; 2] =
[MaybeUninit::uninit(), MaybeUninit::uninit()];
let mut n_changes = 0;
if interests.is_writable() {
let kevent = kevent!(fd, libc::EVFILT_WRITE, flags, token.0);
changes[n_changes] = MaybeUninit::new(kevent);
n_changes += 1;
}
if interests.is_readable() {
let kevent = kevent!(fd, libc::EVFILT_READ, flags, token.0);
changes[n_changes] = MaybeUninit::new(kevent);
n_changes += 1;
}
// Older versions of macOS (OS X 10.11 and 10.10 have been witnessed)
// can return EPIPE when registering a pipe file descriptor where the
// other end has already disappeared. For example code that creates a
// pipe, closes a file descriptor, and then registers the other end will
// see an EPIPE returned from `register`.
//
// It also turns out that kevent will still report events on the file
// descriptor, telling us that it's readable/hup at least after we've
// done this registration. As a result we just ignore `EPIPE` here
// instead of propagating it.
//
// More info can be found at tokio-rs/mio#582.
let changes = unsafe {
// This is safe because we ensure that at least `n_changes` are in
// the array.
slice::from_raw_parts_mut(changes[0].as_mut_ptr(), n_changes)
};
kevent_register(self.kq, changes, &[libc::EPIPE as i64])
}
pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
let flags = libc::EV_CLEAR | libc::EV_RECEIPT;
let write_flags = if interests.is_writable() {
flags | libc::EV_ADD
} else {
flags | libc::EV_DELETE
};
let read_flags = if interests.is_readable() {
flags | libc::EV_ADD
} else {
flags | libc::EV_DELETE
};
let mut changes: [libc::kevent; 2] = [
kevent!(fd, libc::EVFILT_WRITE, write_flags, token.0),
kevent!(fd, libc::EVFILT_READ, read_flags, token.0),
];
// Since there is no way to check with which interests the fd was
// registered we modify both readable and write, adding it when required
// and removing it otherwise, ignoring the ENOENT error when it comes
// up. The ENOENT error informs us that a filter we're trying to remove
// wasn't there in first place, but we don't really care since our goal
// is accomplished.
//
// For the explanation of ignoring `EPIPE` see `register`.
kevent_register(
self.kq,
&mut changes,
&[libc::ENOENT as i64, libc::EPIPE as i64],
)
}
pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
let flags = libc::EV_DELETE | libc::EV_RECEIPT;
let mut changes: [libc::kevent; 2] = [
kevent!(fd, libc::EVFILT_WRITE, flags, 0),
kevent!(fd, libc::EVFILT_READ, flags, 0),
];
// Since there is no way to check with which interests the fd was
// registered we remove both filters (readable and writeable) and ignore
// the ENOENT error when it comes up. The ENOENT error informs us that
// the filter wasn't there in first place, but we don't really care
// about that since our goal is to remove it.
kevent_register(self.kq, &mut changes, &[libc::ENOENT as i64])
}
// Used by `Waker`.
#[cfg(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
pub fn setup_waker(&self, token: Token) -> io::Result<()> {
// First attempt to accept user space notifications.
let mut kevent = kevent!(
0,
libc::EVFILT_USER,
libc::EV_ADD | libc::EV_CLEAR | libc::EV_RECEIPT,
token.0
);
syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
Err(io::Error::from_raw_os_error(kevent.data as i32))
} else {
Ok(())
}
})
}
// Used by `Waker`.
#[cfg(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
pub fn wake(&self, token: Token) -> io::Result<()> {
let mut kevent = kevent!(
0,
libc::EVFILT_USER,
libc::EV_ADD | libc::EV_RECEIPT,
token.0
);
kevent.fflags = libc::NOTE_TRIGGER;
syscall!(kevent(self.kq, &kevent, 1, &mut kevent, 1, ptr::null())).and_then(|_| {
if (kevent.flags & libc::EV_ERROR) != 0 && kevent.data != 0 {
Err(io::Error::from_raw_os_error(kevent.data as i32))
} else {
Ok(())
}
})
}
}
/// Register `changes` with `kq`ueue.
fn kevent_register(
kq: RawFd,
changes: &mut [libc::kevent],
ignored_errors: &[i64],
) -> io::Result<()> {
syscall!(kevent(
kq,
changes.as_ptr(),
changes.len() as Count,
changes.as_mut_ptr(),
changes.len() as Count,
ptr::null(),
))
.map(|_| ())
.or_else(|err| {
// According to the manual page of FreeBSD: "When kevent() call fails
// with EINTR error, all changes in the changelist have been applied",
// so we can safely ignore it.
if err.raw_os_error() == Some(libc::EINTR) {
Ok(())
} else {
Err(err)
}
})
.and_then(|()| check_errors(changes, ignored_errors))
}
/// Check all events for possible errors, it returns the first error found.
fn check_errors(events: &[libc::kevent], ignored_errors: &[i64]) -> io::Result<()> {
for event in events {
// We can't use references to packed structures (in checking the ignored
// errors), so we need copy the data out before use.
let data = event.data as _;
// Check for the error flag, the actual error will be in the `data`
// field.
if (event.flags & libc::EV_ERROR != 0) && data != 0 && !ignored_errors.contains(&data) {
return Err(io::Error::from_raw_os_error(data as i32));
}
}
Ok(())
}
cfg_io_source! {
#[cfg(debug_assertions)]
impl Selector {
pub fn id(&self) -> usize {
self.id
}
}
}
impl AsRawFd for Selector {
fn as_raw_fd(&self) -> RawFd {
self.kq
}
}
impl Drop for Selector {
fn drop(&mut self) {
if let Err(err) = syscall!(close(self.kq)) {
error!("error closing kqueue: {}", err);
}
}
}
pub type Event = libc::kevent;
pub struct Events(Vec<libc::kevent>);
impl Events {
pub fn with_capacity(capacity: usize) -> Events {
Events(Vec::with_capacity(capacity))
}
}
impl Deref for Events {
type Target = Vec<libc::kevent>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Events {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
// `Events` cannot derive `Send` or `Sync` because of the
// `udata: *mut ::c_void` field in `libc::kevent`. However, `Events`'s public
// API treats the `udata` field as a `uintptr_t` which is `Send`. `Sync` is
// safe because with a `events: &Events` value, the only access to the `udata`
// field is through `fn token(event: &Event)` which cannot mutate the field.
unsafe impl Send for Events {}
unsafe impl Sync for Events {}
pub mod event {
use std::fmt;
use crate::sys::Event;
use crate::Token;
use super::{Filter, Flags};
pub fn token(event: &Event) -> Token {
Token(event.udata as usize)
}
pub fn is_readable(event: &Event) -> bool {
event.filter == libc::EVFILT_READ || {
#[cfg(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
// Used by the `Awakener`. On platforms that use `eventfd` or a unix
// pipe it will emit a readable event so we'll fake that here as
// well.
{
event.filter == libc::EVFILT_USER
}
#[cfg(not(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
)))]
{
false
}
}
}
pub fn is_writable(event: &Event) -> bool {
event.filter == libc::EVFILT_WRITE
}
pub fn is_error(event: &Event) -> bool {
(event.flags & libc::EV_ERROR) != 0 ||
// When the read end of the socket is closed, EV_EOF is set on
// flags, and fflags contains the error if there is one.
(event.flags & libc::EV_EOF) != 0 && event.fflags != 0
}
pub fn is_read_closed(event: &Event) -> bool {
event.filter == libc::EVFILT_READ && event.flags & libc::EV_EOF != 0
}
pub fn is_write_closed(event: &Event) -> bool {
event.filter == libc::EVFILT_WRITE && event.flags & libc::EV_EOF != 0
}
pub fn is_priority(_: &Event) -> bool {
// kqueue doesn't have priority indicators.
false
}
#[allow(unused_variables)] // `event` is not used on some platforms.
pub fn is_aio(event: &Event) -> bool {
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
{
event.filter == libc::EVFILT_AIO
}
#[cfg(not(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
)))]
{
false
}
}
#[allow(unused_variables)] // `event` is only used on FreeBSD.
pub fn is_lio(event: &Event) -> bool {
#[cfg(target_os = "freebsd")]
{
event.filter == libc::EVFILT_LIO
}
#[cfg(not(target_os = "freebsd"))]
{
false
}
}
pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
debug_detail!(
FilterDetails(Filter),
PartialEq::eq,
libc::EVFILT_READ,
libc::EVFILT_WRITE,
libc::EVFILT_AIO,
libc::EVFILT_VNODE,
libc::EVFILT_PROC,
libc::EVFILT_SIGNAL,
libc::EVFILT_TIMER,
#[cfg(target_os = "freebsd")]
libc::EVFILT_PROCDESC,
#[cfg(any(
target_os = "freebsd",
target_os = "dragonfly",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::EVFILT_FS,
#[cfg(target_os = "freebsd")]
libc::EVFILT_LIO,
#[cfg(any(
target_os = "freebsd",
target_os = "dragonfly",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::EVFILT_USER,
#[cfg(target_os = "freebsd")]
libc::EVFILT_SENDFILE,
#[cfg(target_os = "freebsd")]
libc::EVFILT_EMPTY,
#[cfg(target_os = "dragonfly")]
libc::EVFILT_EXCEPT,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::EVFILT_MACHPORT,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::EVFILT_VM,
);
#[allow(clippy::trivially_copy_pass_by_ref)]
fn check_flag(got: &Flags, want: &Flags) -> bool {
(got & want) != 0
}
debug_detail!(
FlagsDetails(Flags),
check_flag,
libc::EV_ADD,
libc::EV_DELETE,
libc::EV_ENABLE,
libc::EV_DISABLE,
libc::EV_ONESHOT,
libc::EV_CLEAR,
libc::EV_RECEIPT,
libc::EV_DISPATCH,
#[cfg(target_os = "freebsd")]
libc::EV_DROP,
libc::EV_FLAG1,
libc::EV_ERROR,
libc::EV_EOF,
libc::EV_SYSFLAGS,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::EV_FLAG0,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::EV_POLL,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::EV_OOBAND,
#[cfg(target_os = "dragonfly")]
libc::EV_NODATA,
);
#[allow(clippy::trivially_copy_pass_by_ref)]
fn check_fflag(got: &u32, want: &u32) -> bool {
(got & want) != 0
}
debug_detail!(
FflagsDetails(u32),
check_fflag,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_TRIGGER,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_FFNOP,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_FFAND,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_FFOR,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_FFCOPY,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_FFCTRLMASK,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
))]
libc::NOTE_FFLAGSMASK,
libc::NOTE_LOWAT,
libc::NOTE_DELETE,
libc::NOTE_WRITE,
#[cfg(target_os = "dragonfly")]
libc::NOTE_OOB,
#[cfg(target_os = "openbsd")]
libc::NOTE_EOF,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXTEND,
libc::NOTE_ATTRIB,
libc::NOTE_LINK,
libc::NOTE_RENAME,
libc::NOTE_REVOKE,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_NONE,
#[cfg(any(target_os = "openbsd"))]
libc::NOTE_TRUNCATE,
libc::NOTE_EXIT,
libc::NOTE_FORK,
libc::NOTE_EXEC,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_SIGNAL,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXITSTATUS,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXIT_DETAIL,
libc::NOTE_PDATAMASK,
libc::NOTE_PCTRLMASK,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
))]
libc::NOTE_TRACK,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
))]
libc::NOTE_TRACKERR,
#[cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
))]
libc::NOTE_CHILD,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXIT_DETAIL_MASK,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXIT_DECRYPTFAIL,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXIT_MEMORY,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_EXIT_CSERROR,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_VM_PRESSURE,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_VM_PRESSURE_TERMINATE,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_VM_PRESSURE_SUDDEN_TERMINATE,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_VM_ERROR,
#[cfg(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_SECONDS,
#[cfg(any(target_os = "freebsd"))]
libc::NOTE_MSECONDS,
#[cfg(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_USECONDS,
#[cfg(any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_NSECONDS,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_ABSOLUTE,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_LEEWAY,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_CRITICAL,
#[cfg(any(
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos"
))]
libc::NOTE_BACKGROUND,
);
// Can't reference fields in packed structures.
let ident = event.ident;
let data = event.data;
let udata = event.udata;
f.debug_struct("kevent")
.field("ident", &ident)
.field("filter", &FilterDetails(event.filter))
.field("flags", &FlagsDetails(event.flags))
.field("fflags", &FflagsDetails(event.fflags))
.field("data", &data)
.field("udata", &udata)
.finish()
}
}
#[test]
#[cfg(feature = "os-ext")]
fn does_not_register_rw() {
use crate::unix::SourceFd;
use crate::{Poll, Token};
let kq = unsafe { libc::kqueue() };
let mut kqf = SourceFd(&kq);
let poll = Poll::new().unwrap();
// Registering kqueue fd will fail if write is requested (On anything but
// some versions of macOS).
poll.registry()
.register(&mut kqf, Token(1234), Interest::READABLE)
.unwrap();
}

View file

@ -0,0 +1,80 @@
#[cfg(all(
not(mio_unsupported_force_poll_poll),
any(
target_os = "android",
target_os = "illumos",
target_os = "linux",
target_os = "redox",
)
))]
mod epoll;
#[cfg(all(
not(mio_unsupported_force_poll_poll),
any(
target_os = "android",
target_os = "illumos",
target_os = "linux",
target_os = "redox",
)
))]
pub(crate) use self::epoll::{event, Event, Events, Selector};
#[cfg(any(
mio_unsupported_force_poll_poll,
target_os = "solaris",
target_os = "vita"
))]
mod poll;
#[cfg(any(
mio_unsupported_force_poll_poll,
target_os = "solaris",
target_os = "vita"
))]
pub(crate) use self::poll::{event, Event, Events, Selector};
cfg_io_source! {
#[cfg(any(mio_unsupported_force_poll_poll, target_os = "solaris", target_os = "vita"))]
pub(crate) use self::poll::IoSourceState;
}
#[cfg(all(
not(mio_unsupported_force_poll_poll),
any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "tvos",
target_os = "watchos",
)
))]
mod kqueue;
#[cfg(all(
not(mio_unsupported_force_poll_poll),
any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "tvos",
target_os = "watchos",
),
))]
pub(crate) use self::kqueue::{event, Event, Events, Selector};
/// Lowest file descriptor used in `Selector::try_clone`.
///
/// # Notes
///
/// Usually fds 0, 1 and 2 are standard in, out and error. Some application
/// blindly assume this to be true, which means using any one of those a select
/// could result in some interesting and unexpected errors. Avoid that by using
/// an fd that doesn't have a pre-determined usage.
const LOWEST_FD: libc::c_int = 3;

View file

@ -0,0 +1,726 @@
// This implementation is based on the one in the `polling` crate.
// Thanks to https://github.com/Kestrer for the original implementation!
// Permission to use this code has been granted by original author:
// https://github.com/tokio-rs/mio/pull/1602#issuecomment-1218441031
use crate::sys::unix::selector::LOWEST_FD;
use crate::sys::unix::waker::WakerInternal;
use crate::{Interest, Token};
use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::atomic::AtomicBool;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Condvar, Mutex};
use std::time::Duration;
use std::{cmp, fmt, io};
/// Unique id for use as `SelectorId`.
#[cfg(debug_assertions)]
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
#[derive(Debug)]
pub struct Selector {
state: Arc<SelectorState>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
let state = SelectorState::new()?;
Ok(Selector {
state: Arc::new(state),
})
}
pub fn try_clone(&self) -> io::Result<Selector> {
// Just to keep the compiler happy :)
let _ = LOWEST_FD;
let state = self.state.clone();
Ok(Selector { state })
}
pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
self.state.select(events, timeout)
}
pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
self.state.register(fd, token, interests)
}
#[allow(dead_code)]
pub(crate) fn register_internal(
&self,
fd: RawFd,
token: Token,
interests: Interest,
) -> io::Result<Arc<RegistrationRecord>> {
self.state.register_internal(fd, token, interests)
}
pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
self.state.reregister(fd, token, interests)
}
pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
self.state.deregister(fd)
}
pub fn wake(&self, token: Token) -> io::Result<()> {
self.state.wake(token)
}
cfg_io_source! {
#[cfg(debug_assertions)]
pub fn id(&self) -> usize {
self.state.id
}
}
}
/// Interface to poll.
#[derive(Debug)]
struct SelectorState {
/// File descriptors to poll.
fds: Mutex<Fds>,
/// File descriptors which will be removed before the next poll call.
///
/// When a file descriptor is deregistered while a poll is running, we need to filter
/// out all removed descriptors after that poll is finished running.
pending_removal: Mutex<Vec<RawFd>>,
/// Token associated with Waker that have recently asked to wake. This will
/// cause a synthetic behaviour where on any wakeup we add all pending tokens
/// to the list of emitted events.
pending_wake_token: Mutex<Option<Token>>,
/// Data is written to this to wake up the current instance of `wait`, which can occur when the
/// user notifies it (in which case `notified` would have been set) or when an operation needs
/// to occur (in which case `waiting_operations` would have been incremented).
notify_waker: WakerInternal,
/// The number of operations (`add`, `modify` or `delete`) that are currently waiting on the
/// mutex to become free. When this is nonzero, `wait` must be suspended until it reaches zero
/// again.
waiting_operations: AtomicUsize,
/// The condition variable that gets notified when `waiting_operations` reaches zero or
/// `notified` becomes true.
///
/// This is used with the `fds` mutex.
operations_complete: Condvar,
/// This selectors id.
#[cfg(debug_assertions)]
#[allow(dead_code)]
id: usize,
}
/// The file descriptors to poll in a `Poller`.
#[derive(Debug, Clone)]
struct Fds {
/// The list of `pollfds` taken by poll.
///
/// The first file descriptor is always present and is used to notify the poller.
poll_fds: Vec<PollFd>,
/// The map of each file descriptor to data associated with it. This does not include the file
/// descriptors created by the internal notify waker.
fd_data: HashMap<RawFd, FdData>,
}
/// Transparent wrapper around `libc::pollfd`, used to support `Debug` derives without adding the
/// `extra_traits` feature of `libc`.
#[repr(transparent)]
#[derive(Clone)]
struct PollFd(libc::pollfd);
impl Debug for PollFd {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("pollfd")
.field("fd", &self.0.fd)
.field("events", &self.0.events)
.field("revents", &self.0.revents)
.finish()
}
}
/// Data associated with a file descriptor in a poller.
#[derive(Debug, Clone)]
struct FdData {
/// The index into `poll_fds` this file descriptor is.
poll_fds_index: usize,
/// The key of the `Event` associated with this file descriptor.
token: Token,
/// Used to communicate with IoSourceState when we need to internally deregister
/// based on a closed fd.
shared_record: Arc<RegistrationRecord>,
}
impl SelectorState {
pub fn new() -> io::Result<SelectorState> {
let notify_waker = WakerInternal::new()?;
Ok(Self {
fds: Mutex::new(Fds {
poll_fds: vec![PollFd(libc::pollfd {
fd: notify_waker.as_raw_fd(),
events: libc::POLLIN,
revents: 0,
})],
fd_data: HashMap::new(),
}),
pending_removal: Mutex::new(Vec::new()),
pending_wake_token: Mutex::new(None),
notify_waker,
waiting_operations: AtomicUsize::new(0),
operations_complete: Condvar::new(),
#[cfg(debug_assertions)]
id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
})
}
pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
events.clear();
let mut fds = self.fds.lock().unwrap();
// Keep track of fds that receive POLLHUP or POLLERR (i.e. won't receive further
// events) and internally deregister them before they are externally deregister'd. See
// IoSourceState below to track how the external deregister call will be handled
// when this state occurs.
let mut closed_raw_fds = Vec::new();
loop {
// Complete all current operations.
loop {
if self.waiting_operations.load(Ordering::SeqCst) == 0 {
break;
}
fds = self.operations_complete.wait(fds).unwrap();
}
// Perform the poll.
trace!("Polling on {:?}", &fds);
let num_events = poll(&mut fds.poll_fds, timeout)?;
trace!("Poll finished: {:?}", &fds);
if num_events == 0 {
return Ok(());
}
let waker_events = fds.poll_fds[0].0.revents;
let notified = waker_events != 0;
let mut num_fd_events = if notified { num_events - 1 } else { num_events };
let pending_wake_token = self.pending_wake_token.lock().unwrap().take();
if notified {
self.notify_waker.ack_and_reset();
if pending_wake_token.is_some() {
num_fd_events += 1;
}
}
// We now check whether this poll was performed with descriptors which were pending
// for removal and filter out any matching.
let mut pending_removal_guard = self.pending_removal.lock().unwrap();
let mut pending_removal = std::mem::replace(pending_removal_guard.as_mut(), Vec::new());
drop(pending_removal_guard);
// Store the events if there were any.
if num_fd_events > 0 {
let fds = &mut *fds;
events.reserve(num_fd_events);
// Add synthetic events we picked up from calls to wake()
if let Some(pending_wake_token) = pending_wake_token {
events.push(Event {
token: pending_wake_token,
events: waker_events,
});
}
for fd_data in fds.fd_data.values_mut() {
let PollFd(poll_fd) = &mut fds.poll_fds[fd_data.poll_fds_index];
if pending_removal.contains(&poll_fd.fd) {
// Fd was removed while poll was running
continue;
}
if poll_fd.revents != 0 {
// Store event
events.push(Event {
token: fd_data.token,
events: poll_fd.revents,
});
if poll_fd.revents & (libc::POLLHUP | libc::POLLERR) != 0 {
pending_removal.push(poll_fd.fd);
closed_raw_fds.push(poll_fd.fd);
}
// Remove the interest which just got triggered
// the IoSourceState/WakerRegistrar used with this selector will add back
// the interest using reregister.
poll_fd.events &= !poll_fd.revents;
// Minor optimization to potentially avoid looping n times where n is the
// number of input fds (i.e. we might loop between m and n times where m is
// the number of fds with revents != 0).
if events.len() == num_fd_events {
break;
}
}
}
break; // No more polling.
}
// If we didn't break above it means we got woken up internally (for example for adding an fd), so we poll again.
}
drop(fds);
let _ = self.deregister_all(&closed_raw_fds);
Ok(())
}
pub fn register(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
self.register_internal(fd, token, interests).map(|_| ())
}
pub fn register_internal(
&self,
fd: RawFd,
token: Token,
interests: Interest,
) -> io::Result<Arc<RegistrationRecord>> {
#[cfg(debug_assertions)]
if fd == self.notify_waker.as_raw_fd() {
return Err(io::Error::from(io::ErrorKind::InvalidInput));
}
// We must handle the unlikely case that the following order of operations happens:
//
// register(1 as RawFd)
// deregister(1 as RawFd)
// register(1 as RawFd)
// <poll happens>
//
// Fd's pending removal only get cleared when poll has been run. It is possible that
// between registering and deregistering and then _again_ registering the file descriptor
// poll never gets called, thus the fd stays stuck in the pending removal list.
//
// To avoid this scenario we remove an fd from pending removals when registering it.
let mut pending_removal = self.pending_removal.lock().unwrap();
if let Some(idx) = pending_removal.iter().position(|&pending| pending == fd) {
pending_removal.swap_remove(idx);
}
drop(pending_removal);
self.modify_fds(|fds| {
if fds.fd_data.contains_key(&fd) {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"I/O source already registered this `Registry` \
(an old file descriptor might have been closed without deregistration)",
));
}
let poll_fds_index = fds.poll_fds.len();
let record = Arc::new(RegistrationRecord::new());
fds.fd_data.insert(
fd,
FdData {
poll_fds_index,
token,
shared_record: record.clone(),
},
);
fds.poll_fds.push(PollFd(libc::pollfd {
fd,
events: interests_to_poll(interests),
revents: 0,
}));
Ok(record)
})
}
pub fn reregister(&self, fd: RawFd, token: Token, interests: Interest) -> io::Result<()> {
self.modify_fds(|fds| {
let data = fds.fd_data.get_mut(&fd).ok_or(io::ErrorKind::NotFound)?;
data.token = token;
let poll_fds_index = data.poll_fds_index;
fds.poll_fds[poll_fds_index].0.events = interests_to_poll(interests);
Ok(())
})
}
pub fn deregister(&self, fd: RawFd) -> io::Result<()> {
self.deregister_all(&[fd])
.map_err(|_| io::ErrorKind::NotFound)?;
Ok(())
}
/// Perform a modification on `fds`, interrupting the current caller of `wait` if it's running.
fn modify_fds<T>(&self, f: impl FnOnce(&mut Fds) -> T) -> T {
self.waiting_operations.fetch_add(1, Ordering::SeqCst);
// Wake up the current caller of `wait` if there is one.
let sent_notification = self.notify_waker.wake().is_ok();
let mut fds = self.fds.lock().unwrap();
// If there was no caller of `wait` our notification was not removed from the pipe.
if sent_notification {
self.notify_waker.ack_and_reset();
}
let res = f(&mut *fds);
if self.waiting_operations.fetch_sub(1, Ordering::SeqCst) == 1 {
self.operations_complete.notify_one();
}
res
}
/// Special optimized version of [Self::deregister] which handles multiple removals
/// at once. Ok result if all removals were performed, Err if any entries
/// were not found.
fn deregister_all(&self, targets: &[RawFd]) -> Result<(), ()> {
if targets.is_empty() {
return Ok(());
}
let mut pending_removal = self.pending_removal.lock().unwrap();
pending_removal.extend(targets);
drop(pending_removal);
self.modify_fds(|fds| {
let mut all_successful = true;
for target in targets {
match fds.fd_data.remove(target).ok_or(()) {
Ok(data) => {
data.shared_record.mark_unregistered();
fds.poll_fds.swap_remove(data.poll_fds_index);
if let Some(swapped_pollfd) = fds.poll_fds.get(data.poll_fds_index) {
fds.fd_data
.get_mut(&swapped_pollfd.0.fd)
.unwrap()
.poll_fds_index = data.poll_fds_index;
}
}
Err(_) => all_successful = false,
}
}
if all_successful {
Ok(())
} else {
Err(())
}
})
}
pub fn wake(&self, token: Token) -> io::Result<()> {
self.pending_wake_token.lock().unwrap().replace(token);
self.notify_waker.wake()
}
}
/// Shared record between IoSourceState and SelectorState that allows us to internally
/// deregister partially or fully closed fds (i.e. when we get POLLHUP or PULLERR) without
/// confusing IoSourceState and trying to deregister twice. This isn't strictly
/// required as technically deregister is idempotent but it is confusing
/// when trying to debug behaviour as we get imbalanced calls to register/deregister and
/// superfluous NotFound errors.
#[derive(Debug)]
pub(crate) struct RegistrationRecord {
is_unregistered: AtomicBool,
}
impl RegistrationRecord {
pub fn new() -> Self {
Self {
is_unregistered: AtomicBool::new(false),
}
}
pub fn mark_unregistered(&self) {
self.is_unregistered.store(true, Ordering::Relaxed);
}
#[allow(dead_code)]
pub fn is_registered(&self) -> bool {
!self.is_unregistered.load(Ordering::Relaxed)
}
}
#[cfg(target_os = "linux")]
const POLLRDHUP: libc::c_short = libc::POLLRDHUP;
#[cfg(not(target_os = "linux"))]
const POLLRDHUP: libc::c_short = 0;
const READ_EVENTS: libc::c_short = libc::POLLIN | POLLRDHUP;
const WRITE_EVENTS: libc::c_short = libc::POLLOUT;
const PRIORITY_EVENTS: libc::c_short = libc::POLLPRI;
/// Get the input poll events for the given event.
fn interests_to_poll(interest: Interest) -> libc::c_short {
let mut kind = 0;
if interest.is_readable() {
kind |= READ_EVENTS;
}
if interest.is_writable() {
kind |= WRITE_EVENTS;
}
if interest.is_priority() {
kind |= PRIORITY_EVENTS;
}
kind
}
/// Helper function to call poll.
fn poll(fds: &mut [PollFd], timeout: Option<Duration>) -> io::Result<usize> {
loop {
// A bug in kernels < 2.6.37 makes timeouts larger than LONG_MAX / CONFIG_HZ
// (approx. 30 minutes with CONFIG_HZ=1200) effectively infinite on 32 bits
// architectures. The magic number is the same constant used by libuv.
#[cfg(target_pointer_width = "32")]
const MAX_SAFE_TIMEOUT: u128 = 1789569;
#[cfg(not(target_pointer_width = "32"))]
const MAX_SAFE_TIMEOUT: u128 = libc::c_int::max_value() as u128;
let timeout = timeout
.map(|to| {
// `Duration::as_millis` truncates, so round up. This avoids
// turning sub-millisecond timeouts into a zero timeout, unless
// the caller explicitly requests that by specifying a zero
// timeout.
let to_ms = to
.checked_add(Duration::from_nanos(999_999))
.unwrap_or(to)
.as_millis();
cmp::min(MAX_SAFE_TIMEOUT, to_ms) as libc::c_int
})
.unwrap_or(-1);
let res = syscall!(poll(
fds.as_mut_ptr() as *mut libc::pollfd,
fds.len() as libc::nfds_t,
timeout,
));
match res {
Ok(num_events) => break Ok(num_events as usize),
// poll returns EAGAIN if we can retry it.
Err(e) if e.raw_os_error() == Some(libc::EAGAIN) => continue,
Err(e) => return Err(e),
}
}
}
#[derive(Debug, Clone)]
pub struct Event {
token: Token,
events: libc::c_short,
}
pub type Events = Vec<Event>;
pub mod event {
use crate::sys::unix::selector::poll::POLLRDHUP;
use crate::sys::Event;
use crate::Token;
use std::fmt;
pub fn token(event: &Event) -> Token {
event.token
}
pub fn is_readable(event: &Event) -> bool {
(event.events & libc::POLLIN) != 0 || (event.events & libc::POLLPRI) != 0
}
pub fn is_writable(event: &Event) -> bool {
(event.events & libc::POLLOUT) != 0
}
pub fn is_error(event: &Event) -> bool {
(event.events & libc::POLLERR) != 0
}
pub fn is_read_closed(event: &Event) -> bool {
// Both halves of the socket have closed
(event.events & libc::POLLHUP) != 0
// Socket has received FIN or called shutdown(SHUT_RD)
|| (event.events & POLLRDHUP) != 0
}
pub fn is_write_closed(event: &Event) -> bool {
// Both halves of the socket have closed
(event.events & libc::POLLHUP) != 0
// Unix pipe write end has closed
|| ((event.events & libc::POLLOUT) != 0 && (event.events & libc::POLLERR) != 0)
// The other side (read end) of a Unix pipe has closed.
|| (event.events == libc::POLLERR)
}
pub fn is_priority(event: &Event) -> bool {
(event.events & libc::POLLPRI) != 0
}
pub fn is_aio(_: &Event) -> bool {
// Not supported in the kernel, only in libc.
false
}
pub fn is_lio(_: &Event) -> bool {
// Not supported.
false
}
pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
#[allow(clippy::trivially_copy_pass_by_ref)]
fn check_events(got: &libc::c_short, want: &libc::c_short) -> bool {
(*got & want) != 0
}
debug_detail!(
EventsDetails(libc::c_short),
check_events,
libc::POLLIN,
libc::POLLPRI,
libc::POLLOUT,
libc::POLLRDNORM,
libc::POLLRDBAND,
libc::POLLWRNORM,
libc::POLLWRBAND,
libc::POLLERR,
libc::POLLHUP,
);
f.debug_struct("poll_event")
.field("token", &event.token)
.field("events", &EventsDetails(event.events))
.finish()
}
}
cfg_io_source! {
use crate::Registry;
struct InternalState {
selector: Selector,
token: Token,
interests: Interest,
fd: RawFd,
shared_record: Arc<RegistrationRecord>,
}
impl Drop for InternalState {
fn drop(&mut self) {
if self.shared_record.is_registered() {
let _ = self.selector.deregister(self.fd);
}
}
}
pub(crate) struct IoSourceState {
inner: Option<Box<InternalState>>,
}
impl IoSourceState {
pub fn new() -> IoSourceState {
IoSourceState { inner: None }
}
pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
where
F: FnOnce(&T) -> io::Result<R>,
{
let result = f(io);
if let Err(err) = &result {
if err.kind() == io::ErrorKind::WouldBlock {
self.inner.as_ref().map_or(Ok(()), |state| {
state
.selector
.reregister(state.fd, state.token, state.interests)
})?;
}
}
result
}
pub fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
fd: RawFd,
) -> io::Result<()> {
if self.inner.is_some() {
Err(io::ErrorKind::AlreadyExists.into())
} else {
let selector = registry.selector().try_clone()?;
selector.register_internal(fd, token, interests).map(move |shared_record| {
let state = InternalState {
selector,
token,
interests,
fd,
shared_record,
};
self.inner = Some(Box::new(state));
})
}
}
pub fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
fd: RawFd,
) -> io::Result<()> {
match self.inner.as_mut() {
Some(state) => registry
.selector()
.reregister(fd, token, interests)
.map(|()| {
state.token = token;
state.interests = interests;
}),
None => Err(io::ErrorKind::NotFound.into()),
}
}
pub fn deregister(&mut self, registry: &Registry, fd: RawFd) -> io::Result<()> {
if let Some(state) = self.inner.take() {
// Marking unregistered will short circuit the drop behaviour of calling
// deregister so the call to deregister below is strictly required.
state.shared_record.mark_unregistered();
}
registry.selector().deregister(fd)
}
}
}

View file

@ -0,0 +1,116 @@
use crate::{event, Interest, Registry, Token};
use std::io;
use std::os::unix::io::RawFd;
/// Adapter for [`RawFd`] providing an [`event::Source`] implementation.
///
/// `SourceFd` enables registering any type with an FD with [`Poll`].
///
/// While only implementations for TCP and UDP are provided, Mio supports
/// registering any FD that can be registered with the underlying OS selector.
/// `SourceFd` provides the necessary bridge.
///
/// Note that `SourceFd` takes a `&RawFd`. This is because `SourceFd` **does
/// not** take ownership of the FD. Specifically, it will not manage any
/// lifecycle related operations, such as closing the FD on drop. It is expected
/// that the `SourceFd` is constructed right before a call to
/// [`Registry::register`]. See the examples for more detail.
///
/// [`event::Source`]: ../event/trait.Source.html
/// [`Poll`]: ../struct.Poll.html
/// [`Registry::register`]: ../struct.Registry.html#method.register
///
/// # Examples
///
/// Basic usage.
///
#[cfg_attr(
all(feature = "os-poll", feature = "net", feature = "os-ext"),
doc = "```"
)]
#[cfg_attr(
not(all(feature = "os-poll", feature = "net", feature = "os-ext")),
doc = "```ignore"
)]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Interest, Poll, Token};
/// use mio::unix::SourceFd;
///
/// use std::os::unix::io::AsRawFd;
/// use std::net::TcpListener;
///
/// // Bind a std listener
/// let listener = TcpListener::bind("127.0.0.1:0")?;
///
/// let poll = Poll::new()?;
///
/// // Register the listener
/// poll.registry().register(
/// &mut SourceFd(&listener.as_raw_fd()),
/// Token(0),
/// Interest::READABLE)?;
/// # Ok(())
/// # }
/// ```
///
/// Implementing [`event::Source`] for a custom type backed by a [`RawFd`].
///
#[cfg_attr(all(feature = "os-poll", feature = "os-ext"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "os-ext")), doc = "```ignore")]
/// use mio::{event, Interest, Registry, Token};
/// use mio::unix::SourceFd;
///
/// use std::os::unix::io::RawFd;
/// use std::io;
///
/// # #[allow(dead_code)]
/// pub struct MyIo {
/// fd: RawFd,
/// }
///
/// impl event::Source for MyIo {
/// fn register(&mut self, registry: &Registry, token: Token, interests: Interest)
/// -> io::Result<()>
/// {
/// SourceFd(&self.fd).register(registry, token, interests)
/// }
///
/// fn reregister(&mut self, registry: &Registry, token: Token, interests: Interest)
/// -> io::Result<()>
/// {
/// SourceFd(&self.fd).reregister(registry, token, interests)
/// }
///
/// fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
/// SourceFd(&self.fd).deregister(registry)
/// }
/// }
/// ```
#[derive(Debug)]
pub struct SourceFd<'a>(pub &'a RawFd);
impl<'a> event::Source for SourceFd<'a> {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
registry.selector().register(*self.0, token, interests)
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
registry.selector().reregister(*self.0, token, interests)
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
registry.selector().deregister(*self.0)
}
}

View file

@ -0,0 +1,122 @@
use std::convert::TryInto;
use std::io;
use std::mem::{size_of, MaybeUninit};
use std::net::{self, SocketAddr};
use std::os::unix::io::{AsRawFd, FromRawFd};
use crate::sys::unix::net::{new_socket, socket_addr, to_socket_addr};
pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<libc::c_int> {
let domain = match address {
SocketAddr::V4(_) => libc::AF_INET,
SocketAddr::V6(_) => libc::AF_INET6,
};
new_socket(domain, libc::SOCK_STREAM)
}
pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(bind(socket.as_raw_fd(), raw_addr.as_ptr(), raw_addr_length))?;
Ok(())
}
pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
let (raw_addr, raw_addr_length) = socket_addr(&addr);
match syscall!(connect(
socket.as_raw_fd(),
raw_addr.as_ptr(),
raw_addr_length
)) {
Err(err) if err.raw_os_error() != Some(libc::EINPROGRESS) => Err(err),
_ => Ok(()),
}
}
pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
let backlog = backlog.try_into().unwrap_or(i32::max_value());
syscall!(listen(socket.as_raw_fd(), backlog))?;
Ok(())
}
pub(crate) fn set_reuseaddr(socket: &net::TcpListener, reuseaddr: bool) -> io::Result<()> {
let val: libc::c_int = i32::from(reuseaddr);
syscall!(setsockopt(
socket.as_raw_fd(),
libc::SOL_SOCKET,
libc::SO_REUSEADDR,
&val as *const libc::c_int as *const libc::c_void,
size_of::<libc::c_int>() as libc::socklen_t,
))?;
Ok(())
}
pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
let mut addr: MaybeUninit<libc::sockaddr_storage> = MaybeUninit::uninit();
let mut length = size_of::<libc::sockaddr_storage>() as libc::socklen_t;
// On platforms that support it we can use `accept4(2)` to set `NONBLOCK`
// and `CLOEXEC` in the call to accept the connection.
#[cfg(any(
// Android x86's seccomp profile forbids calls to `accept4(2)`
// See https://github.com/tokio-rs/mio/issues/1445 for details
all(not(target_arch="x86"), target_os = "android"),
target_os = "dragonfly",
target_os = "freebsd",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris",
))]
let stream = {
syscall!(accept4(
listener.as_raw_fd(),
addr.as_mut_ptr() as *mut _,
&mut length,
libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK,
))
.map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
}?;
// But not all platforms have the `accept4(2)` call. Luckily BSD (derived)
// OSes inherit the non-blocking flag from the listener, so we just have to
// set `CLOEXEC`.
#[cfg(any(
target_os = "aix",
target_os = "ios",
target_os = "macos",
target_os = "redox",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
all(target_arch = "x86", target_os = "android"),
))]
let stream = {
syscall!(accept(
listener.as_raw_fd(),
addr.as_mut_ptr() as *mut _,
&mut length
))
.map(|socket| unsafe { net::TcpStream::from_raw_fd(socket) })
.and_then(|s| {
#[cfg(not(any(target_os = "espidf", target_os = "vita")))]
syscall!(fcntl(s.as_raw_fd(), libc::F_SETFD, libc::FD_CLOEXEC))?;
// See https://github.com/tokio-rs/mio/issues/1450
#[cfg(any(
all(target_arch = "x86", target_os = "android"),
target_os = "espidf",
target_os = "vita",
))]
syscall!(fcntl(s.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK))?;
Ok(s)
})
}?;
// This is safe because `accept` calls above ensures the address
// initialised.
unsafe { to_socket_addr(addr.as_ptr()) }.map(|addr| (stream, addr))
}

View file

@ -0,0 +1,31 @@
use crate::sys::unix::net::{new_ip_socket, socket_addr};
use std::io;
use std::mem;
use std::net::{self, SocketAddr};
use std::os::unix::io::{AsRawFd, FromRawFd};
pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
let fd = new_ip_socket(addr, libc::SOCK_DGRAM)?;
let socket = unsafe { net::UdpSocket::from_raw_fd(fd) };
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(bind(fd, raw_addr.as_ptr(), raw_addr_length))?;
Ok(socket)
}
pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
let mut optval: libc::c_int = 0;
let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
syscall!(getsockopt(
socket.as_raw_fd(),
libc::IPPROTO_IPV6,
libc::IPV6_V6ONLY,
&mut optval as *mut _ as *mut _,
&mut optlen,
))?;
Ok(optval != 0)
}

View file

@ -0,0 +1,57 @@
use super::{socket_addr, SocketAddr};
use crate::sys::unix::net::new_socket;
use std::io;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::os::unix::net;
use std::path::Path;
pub(crate) fn bind(path: &Path) -> io::Result<net::UnixDatagram> {
let (sockaddr, socklen) = socket_addr(path.as_os_str().as_bytes())?;
let sockaddr = &sockaddr as *const libc::sockaddr_un as *const _;
let socket = unbound()?;
syscall!(bind(socket.as_raw_fd(), sockaddr, socklen))?;
Ok(socket)
}
pub(crate) fn unbound() -> io::Result<net::UnixDatagram> {
let fd = new_socket(libc::AF_UNIX, libc::SOCK_DGRAM)?;
Ok(unsafe { net::UnixDatagram::from_raw_fd(fd) })
}
pub(crate) fn pair() -> io::Result<(net::UnixDatagram, net::UnixDatagram)> {
super::pair(libc::SOCK_DGRAM)
}
pub(crate) fn local_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> {
super::local_addr(socket.as_raw_fd())
}
pub(crate) fn peer_addr(socket: &net::UnixDatagram) -> io::Result<SocketAddr> {
super::peer_addr(socket.as_raw_fd())
}
pub(crate) fn recv_from(
socket: &net::UnixDatagram,
dst: &mut [u8],
) -> io::Result<(usize, SocketAddr)> {
let mut count = 0;
let socketaddr = SocketAddr::new(|sockaddr, socklen| {
syscall!(recvfrom(
socket.as_raw_fd(),
dst.as_mut_ptr() as *mut _,
dst.len(),
0,
sockaddr,
socklen,
))
.map(|c| {
count = c;
c as libc::c_int
})
})?;
Ok((count as usize, socketaddr))
}

View file

@ -0,0 +1,113 @@
use super::socket_addr;
use crate::net::{SocketAddr, UnixStream};
use crate::sys::unix::net::new_socket;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::os::unix::net;
use std::path::Path;
use std::{io, mem};
pub(crate) fn bind(path: &Path) -> io::Result<net::UnixListener> {
let socket_address = {
let (sockaddr, socklen) = socket_addr(path.as_os_str().as_bytes())?;
SocketAddr::from_parts(sockaddr, socklen)
};
bind_addr(&socket_address)
}
pub(crate) fn bind_addr(address: &SocketAddr) -> io::Result<net::UnixListener> {
let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
let socket = unsafe { net::UnixListener::from_raw_fd(fd) };
let sockaddr = address.raw_sockaddr() as *const libc::sockaddr_un as *const libc::sockaddr;
syscall!(bind(fd, sockaddr, *address.raw_socklen()))?;
syscall!(listen(fd, 1024))?;
Ok(socket)
}
pub(crate) fn accept(listener: &net::UnixListener) -> io::Result<(UnixStream, SocketAddr)> {
let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
// This is safe to assume because a `libc::sockaddr_un` filled with `0`
// bytes is properly initialized.
//
// `0` is a valid value for `sockaddr_un::sun_family`; it is
// `libc::AF_UNSPEC`.
//
// `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
// abstract path.
let mut sockaddr = unsafe { sockaddr.assume_init() };
sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
#[cfg(not(any(
target_os = "aix",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "redox",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
// Android x86's seccomp profile forbids calls to `accept4(2)`
// See https://github.com/tokio-rs/mio/issues/1445 for details
all(target_arch = "x86", target_os = "android"),
)))]
let socket = {
let flags = libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
syscall!(accept4(
listener.as_raw_fd(),
&mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
&mut socklen,
flags
))
.map(|socket| unsafe { net::UnixStream::from_raw_fd(socket) })
};
#[cfg(any(
target_os = "aix",
target_os = "ios",
target_os = "macos",
target_os = "netbsd",
target_os = "redox",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
all(target_arch = "x86", target_os = "android")
))]
let socket = syscall!(accept(
listener.as_raw_fd(),
&mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr,
&mut socklen,
))
.and_then(|socket| {
// Ensure the socket is closed if either of the `fcntl` calls
// error below.
let s = unsafe { net::UnixStream::from_raw_fd(socket) };
#[cfg(not(any(target_os = "espidf", target_os = "vita")))]
syscall!(fcntl(socket, libc::F_SETFD, libc::FD_CLOEXEC))?;
// See https://github.com/tokio-rs/mio/issues/1450
#[cfg(any(
all(target_arch = "x86", target_os = "android"),
target_os = "espidf",
target_os = "vita",
))]
syscall!(fcntl(socket, libc::F_SETFL, libc::O_NONBLOCK))?;
Ok(s)
});
socket
.map(UnixStream::from_std)
.map(|stream| (stream, SocketAddr::from_parts(sockaddr, socklen)))
}
pub(crate) fn local_addr(listener: &net::UnixListener) -> io::Result<SocketAddr> {
super::local_addr(listener.as_raw_fd())
}

View file

@ -0,0 +1,164 @@
mod socketaddr;
pub use self::socketaddr::SocketAddr;
/// Get the `sun_path` field offset of `sockaddr_un` for the target OS.
///
/// On Linux, this function equates to the same value as
/// `size_of::<sa_family_t>()`, but some other implementations include
/// other fields before `sun_path`, so the expression more portably
/// describes the size of the address structure.
pub(in crate::sys) fn path_offset(sockaddr: &libc::sockaddr_un) -> usize {
let base = sockaddr as *const _ as usize;
let path = &sockaddr.sun_path as *const _ as usize;
path - base
}
cfg_os_poll! {
use std::cmp::Ordering;
use std::os::unix::io::{RawFd, FromRawFd};
use std::{io, mem};
pub(crate) mod datagram;
pub(crate) mod listener;
pub(crate) mod stream;
pub(in crate::sys) fn socket_addr(bytes: &[u8]) -> io::Result<(libc::sockaddr_un, libc::socklen_t)> {
let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
// This is safe to assume because a `libc::sockaddr_un` filled with `0`
// bytes is properly initialized.
//
// `0` is a valid value for `sockaddr_un::sun_family`; it is
// `libc::AF_UNSPEC`.
//
// `[0; 108]` is a valid value for `sockaddr_un::sun_path`; it begins an
// abstract path.
let mut sockaddr = unsafe { sockaddr.assume_init() };
sockaddr.sun_family = libc::AF_UNIX as libc::sa_family_t;
match (bytes.first(), bytes.len().cmp(&sockaddr.sun_path.len())) {
// Abstract paths don't need a null terminator
(Some(&0), Ordering::Greater) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"path must be no longer than libc::sockaddr_un.sun_path",
));
}
(_, Ordering::Greater) | (_, Ordering::Equal) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"path must be shorter than libc::sockaddr_un.sun_path",
));
}
_ => {}
}
for (dst, src) in sockaddr.sun_path.iter_mut().zip(bytes.iter()) {
*dst = *src as libc::c_char;
}
let offset = path_offset(&sockaddr);
let mut socklen = offset + bytes.len();
match bytes.first() {
// The struct has already been zeroes so the null byte for pathname
// addresses is already there.
Some(&0) | None => {}
Some(_) => socklen += 1,
}
Ok((sockaddr, socklen as libc::socklen_t))
}
fn pair<T>(flags: libc::c_int) -> io::Result<(T, T)>
where T: FromRawFd,
{
#[cfg(not(any(
target_os = "aix",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
)))]
let flags = flags | libc::SOCK_NONBLOCK | libc::SOCK_CLOEXEC;
let mut fds = [-1; 2];
syscall!(socketpair(libc::AF_UNIX, flags, 0, fds.as_mut_ptr()))?;
let pair = unsafe { (T::from_raw_fd(fds[0]), T::from_raw_fd(fds[1])) };
// Darwin (and others) doesn't have SOCK_NONBLOCK or SOCK_CLOEXEC.
//
// In order to set those flags, additional `fcntl` sys calls must be
// performed. If a `fnctl` fails after the sockets have been created,
// the file descriptors will leak. Creating `pair` above ensures that if
// there is an error, the file descriptors are closed.
#[cfg(any(
target_os = "aix",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
target_os = "espidf",
target_os = "vita",
))]
{
syscall!(fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK))?;
#[cfg(not(any(target_os = "espidf", target_os = "vita")))]
syscall!(fcntl(fds[0], libc::F_SETFD, libc::FD_CLOEXEC))?;
syscall!(fcntl(fds[1], libc::F_SETFL, libc::O_NONBLOCK))?;
#[cfg(not(any(target_os = "espidf", target_os = "vita")))]
syscall!(fcntl(fds[1], libc::F_SETFD, libc::FD_CLOEXEC))?;
}
Ok(pair)
}
// The following functions can't simply be replaced with a call to
// `net::UnixDatagram` because of our `SocketAddr` type.
fn local_addr(socket: RawFd) -> io::Result<SocketAddr> {
SocketAddr::new(|sockaddr, socklen| syscall!(getsockname(socket, sockaddr, socklen)))
}
fn peer_addr(socket: RawFd) -> io::Result<SocketAddr> {
SocketAddr::new(|sockaddr, socklen| syscall!(getpeername(socket, sockaddr, socklen)))
}
#[cfg(test)]
mod tests {
use super::{path_offset, socket_addr};
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::str;
#[test]
fn pathname_address() {
const PATH: &str = "./foo/bar.txt";
const PATH_LEN: usize = 13;
// Pathname addresses do have a null terminator, so `socklen` is
// expected to be `PATH_LEN` + `offset` + 1.
let path = Path::new(PATH);
let (sockaddr, actual) = socket_addr(path.as_os_str().as_bytes()).unwrap();
let offset = path_offset(&sockaddr);
let expected = PATH_LEN + offset + 1;
assert_eq!(expected as libc::socklen_t, actual)
}
#[test]
fn abstract_address() {
const PATH: &[u8] = &[0, 116, 111, 107, 105, 111];
const PATH_LEN: usize = 6;
// Abstract addresses do not have a null terminator, so `socklen` is
// expected to be `PATH_LEN` + `offset`.
let (sockaddr, actual) = socket_addr(PATH).unwrap();
let offset = path_offset(&sockaddr);
let expected = PATH_LEN + offset;
assert_eq!(expected as libc::socklen_t, actual)
}
}
}

View file

@ -0,0 +1,138 @@
use super::path_offset;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use std::{ascii, fmt};
/// An address associated with a `mio` specific Unix socket.
///
/// This is implemented instead of imported from [`net::SocketAddr`] because
/// there is no way to create a [`net::SocketAddr`]. One must be returned by
/// [`accept`], so this is returned instead.
///
/// [`net::SocketAddr`]: std::os::unix::net::SocketAddr
/// [`accept`]: #method.accept
pub struct SocketAddr {
sockaddr: libc::sockaddr_un,
socklen: libc::socklen_t,
}
struct AsciiEscaped<'a>(&'a [u8]);
enum AddressKind<'a> {
Unnamed,
Pathname(&'a Path),
Abstract(&'a [u8]),
}
impl SocketAddr {
fn address(&self) -> AddressKind<'_> {
let offset = path_offset(&self.sockaddr);
// Don't underflow in `len` below.
if (self.socklen as usize) < offset {
return AddressKind::Unnamed;
}
let len = self.socklen as usize - offset;
let path = unsafe { &*(&self.sockaddr.sun_path as *const [libc::c_char] as *const [u8]) };
// macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses
if len == 0
|| (cfg!(not(any(target_os = "linux", target_os = "android")))
&& self.sockaddr.sun_path[0] == 0)
{
AddressKind::Unnamed
} else if self.sockaddr.sun_path[0] == 0 {
AddressKind::Abstract(&path[1..len])
} else {
AddressKind::Pathname(OsStr::from_bytes(&path[..len - 1]).as_ref())
}
}
}
cfg_os_poll! {
use std::{io, mem};
impl SocketAddr {
pub(crate) fn new<F>(f: F) -> io::Result<SocketAddr>
where
F: FnOnce(*mut libc::sockaddr, &mut libc::socklen_t) -> io::Result<libc::c_int>,
{
let mut sockaddr = {
let sockaddr = mem::MaybeUninit::<libc::sockaddr_un>::zeroed();
unsafe { sockaddr.assume_init() }
};
let raw_sockaddr = &mut sockaddr as *mut libc::sockaddr_un as *mut libc::sockaddr;
let mut socklen = mem::size_of_val(&sockaddr) as libc::socklen_t;
f(raw_sockaddr, &mut socklen)?;
Ok(SocketAddr::from_parts(sockaddr, socklen))
}
pub(crate) fn from_parts(sockaddr: libc::sockaddr_un, socklen: libc::socklen_t) -> SocketAddr {
SocketAddr { sockaddr, socklen }
}
pub(crate) fn raw_sockaddr(&self) -> &libc::sockaddr_un {
&self.sockaddr
}
pub(crate) fn raw_socklen(&self) -> &libc::socklen_t {
&self.socklen
}
/// Returns `true` if the address is unnamed.
///
/// Documentation reflected in [`SocketAddr`]
///
/// [`SocketAddr`]: std::os::unix::net::SocketAddr
pub fn is_unnamed(&self) -> bool {
matches!(self.address(), AddressKind::Unnamed)
}
/// Returns the contents of this address if it is a `pathname` address.
///
/// Documentation reflected in [`SocketAddr`]
///
/// [`SocketAddr`]: std::os::unix::net::SocketAddr
pub fn as_pathname(&self) -> Option<&Path> {
if let AddressKind::Pathname(path) = self.address() {
Some(path)
} else {
None
}
}
/// Returns the contents of this address if it is an abstract namespace
/// without the leading null byte.
// Link to std::os::unix::net::SocketAddr pending
// https://github.com/rust-lang/rust/issues/85410.
pub fn as_abstract_namespace(&self) -> Option<&[u8]> {
if let AddressKind::Abstract(path) = self.address() {
Some(path)
} else {
None
}
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.address() {
AddressKind::Unnamed => write!(fmt, "(unnamed)"),
AddressKind::Abstract(name) => write!(fmt, "{} (abstract)", AsciiEscaped(name)),
AddressKind::Pathname(path) => write!(fmt, "{:?} (pathname)", path),
}
}
}
impl<'a> fmt::Display for AsciiEscaped<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "\"")?;
for byte in self.0.iter().cloned().flat_map(ascii::escape_default) {
write!(fmt, "{}", byte as char)?;
}
write!(fmt, "\"")
}
}

View file

@ -0,0 +1,43 @@
use super::{socket_addr, SocketAddr};
use crate::sys::unix::net::new_socket;
use std::io;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::os::unix::net;
use std::path::Path;
pub(crate) fn connect(path: &Path) -> io::Result<net::UnixStream> {
let socket_address = {
let (sockaddr, socklen) = socket_addr(path.as_os_str().as_bytes())?;
SocketAddr::from_parts(sockaddr, socklen)
};
connect_addr(&socket_address)
}
pub(crate) fn connect_addr(address: &SocketAddr) -> io::Result<net::UnixStream> {
let fd = new_socket(libc::AF_UNIX, libc::SOCK_STREAM)?;
let socket = unsafe { net::UnixStream::from_raw_fd(fd) };
let sockaddr = address.raw_sockaddr() as *const libc::sockaddr_un as *const libc::sockaddr;
match syscall!(connect(fd, sockaddr, *address.raw_socklen())) {
Ok(_) => {}
Err(ref err) if err.raw_os_error() == Some(libc::EINPROGRESS) => {}
Err(e) => return Err(e),
}
Ok(socket)
}
pub(crate) fn pair() -> io::Result<(net::UnixStream, net::UnixStream)> {
super::pair(libc::SOCK_STREAM)
}
pub(crate) fn local_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> {
super::local_addr(socket.as_raw_fd())
}
pub(crate) fn peer_addr(socket: &net::UnixStream) -> io::Result<SocketAddr> {
super::peer_addr(socket.as_raw_fd())
}

View file

@ -0,0 +1,339 @@
#[cfg(all(
not(mio_unsupported_force_poll_poll),
not(all(
not(mio_unsupported_force_waker_pipe),
any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
)
)),
not(any(target_os = "solaris", target_os = "vita")),
))]
mod fdbased {
#[cfg(all(
not(mio_unsupported_force_waker_pipe),
any(target_os = "linux", target_os = "android"),
))]
use crate::sys::unix::waker::eventfd::WakerInternal;
#[cfg(any(
mio_unsupported_force_waker_pipe,
target_os = "aix",
target_os = "dragonfly",
target_os = "illumos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "redox",
))]
use crate::sys::unix::waker::pipe::WakerInternal;
use crate::sys::Selector;
use crate::{Interest, Token};
use std::io;
use std::os::unix::io::AsRawFd;
#[derive(Debug)]
pub struct Waker {
waker: WakerInternal,
}
impl Waker {
pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
let waker = WakerInternal::new()?;
selector.register(waker.as_raw_fd(), token, Interest::READABLE)?;
Ok(Waker { waker })
}
pub fn wake(&self) -> io::Result<()> {
self.waker.wake()
}
}
}
#[cfg(all(
not(mio_unsupported_force_poll_poll),
not(all(
not(mio_unsupported_force_waker_pipe),
any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
)
)),
not(any(target_os = "solaris", target_os = "vita")),
))]
pub use self::fdbased::Waker;
#[cfg(all(
not(mio_unsupported_force_waker_pipe),
any(target_os = "linux", target_os = "android", target_os = "espidf")
))]
mod eventfd {
use std::fs::File;
use std::io::{self, Read, Write};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
/// Waker backed by `eventfd`.
///
/// `eventfd` is effectively an 64 bit counter. All writes must be of 8
/// bytes (64 bits) and are converted (native endian) into an 64 bit
/// unsigned integer and added to the count. Reads must also be 8 bytes and
/// reset the count to 0, returning the count.
#[derive(Debug)]
pub struct WakerInternal {
fd: File,
}
impl WakerInternal {
pub fn new() -> io::Result<WakerInternal> {
#[cfg(not(target_os = "espidf"))]
let flags = libc::EFD_CLOEXEC | libc::EFD_NONBLOCK;
// ESP-IDF is EFD_NONBLOCK by default and errors if you try to pass this flag.
#[cfg(target_os = "espidf")]
let flags = 0;
let fd = syscall!(eventfd(0, flags))?;
let file = unsafe { File::from_raw_fd(fd) };
Ok(WakerInternal { fd: file })
}
pub fn wake(&self) -> io::Result<()> {
let buf: [u8; 8] = 1u64.to_ne_bytes();
match (&self.fd).write(&buf) {
Ok(_) => Ok(()),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
// Writing only blocks if the counter is going to overflow.
// So we'll reset the counter to 0 and wake it again.
self.reset()?;
self.wake()
}
Err(err) => Err(err),
}
}
#[cfg(mio_unsupported_force_poll_poll)]
pub fn ack_and_reset(&self) {
let _ = self.reset();
}
/// Reset the eventfd object, only need to call this if `wake` fails.
fn reset(&self) -> io::Result<()> {
let mut buf: [u8; 8] = 0u64.to_ne_bytes();
match (&self.fd).read(&mut buf) {
Ok(_) => Ok(()),
// If the `Waker` hasn't been awoken yet this will return a
// `WouldBlock` error which we can safely ignore.
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Ok(()),
Err(err) => Err(err),
}
}
}
impl AsRawFd for WakerInternal {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
}
#[cfg(all(
mio_unsupported_force_poll_poll,
not(mio_unsupported_force_waker_pipe),
any(target_os = "linux", target_os = "android", target_os = "espidf")
))]
pub(crate) use self::eventfd::WakerInternal;
#[cfg(all(
not(mio_unsupported_force_waker_pipe),
any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
)
))]
mod kqueue {
use crate::sys::Selector;
use crate::Token;
use std::io;
/// Waker backed by kqueue user space notifications (`EVFILT_USER`).
///
/// The implementation is fairly simple, first the kqueue must be setup to
/// receive waker events this done by calling `Selector.setup_waker`. Next
/// we need access to kqueue, thus we need to duplicate the file descriptor.
/// Now waking is as simple as adding an event to the kqueue.
#[derive(Debug)]
pub struct Waker {
selector: Selector,
token: Token,
}
impl Waker {
pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
let selector = selector.try_clone()?;
selector.setup_waker(token)?;
Ok(Waker { selector, token })
}
pub fn wake(&self) -> io::Result<()> {
self.selector.wake(self.token)
}
}
}
#[cfg(all(
not(mio_unsupported_force_waker_pipe),
any(
target_os = "freebsd",
target_os = "ios",
target_os = "macos",
target_os = "tvos",
target_os = "watchos",
)
))]
pub use self::kqueue::Waker;
#[cfg(any(
mio_unsupported_force_waker_pipe,
target_os = "aix",
target_os = "dragonfly",
target_os = "illumos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "redox",
target_os = "solaris",
target_os = "vita",
))]
mod pipe {
use crate::sys::unix::pipe;
use std::fs::File;
use std::io::{self, Read, Write};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
/// Waker backed by a unix pipe.
///
/// Waker controls both the sending and receiving ends and empties the pipe
/// if writing to it (waking) fails.
#[derive(Debug)]
pub struct WakerInternal {
sender: File,
receiver: File,
}
impl WakerInternal {
pub fn new() -> io::Result<WakerInternal> {
let [receiver, sender] = pipe::new_raw()?;
let sender = unsafe { File::from_raw_fd(sender) };
let receiver = unsafe { File::from_raw_fd(receiver) };
Ok(WakerInternal { sender, receiver })
}
pub fn wake(&self) -> io::Result<()> {
// The epoll emulation on some illumos systems currently requires
// the pipe buffer to be completely empty for an edge-triggered
// wakeup on the pipe read side.
#[cfg(target_os = "illumos")]
self.empty();
match (&self.sender).write(&[1]) {
Ok(_) => Ok(()),
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => {
// The reading end is full so we'll empty the buffer and try
// again.
self.empty();
self.wake()
}
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => self.wake(),
Err(err) => Err(err),
}
}
#[cfg(any(
mio_unsupported_force_poll_poll,
target_os = "solaris",
target_os = "vita"
))]
pub fn ack_and_reset(&self) {
self.empty();
}
/// Empty the pipe's buffer, only need to call this if `wake` fails.
/// This ignores any errors.
fn empty(&self) {
let mut buf = [0; 4096];
loop {
match (&self.receiver).read(&mut buf) {
Ok(n) if n > 0 => continue,
_ => return,
}
}
}
}
impl AsRawFd for WakerInternal {
fn as_raw_fd(&self) -> RawFd {
self.receiver.as_raw_fd()
}
}
}
#[cfg(any(
all(
mio_unsupported_force_poll_poll,
any(
mio_unsupported_force_waker_pipe,
target_os = "aix",
target_os = "dragonfly",
target_os = "illumos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "redox",
)
),
target_os = "solaris",
target_os = "vita",
))]
pub(crate) use self::pipe::WakerInternal;
#[cfg(any(
mio_unsupported_force_poll_poll,
target_os = "solaris",
target_os = "vita"
))]
mod poll {
use crate::sys::Selector;
use crate::Token;
use std::io;
#[derive(Debug)]
pub struct Waker {
selector: Selector,
token: Token,
}
impl Waker {
pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
Ok(Waker {
selector: selector.try_clone()?,
token,
})
}
pub fn wake(&self) -> io::Result<()> {
self.selector.wake(self.token)
}
}
}
#[cfg(any(
mio_unsupported_force_poll_poll,
target_os = "solaris",
target_os = "vita"
))]
pub use self::poll::Waker;

View file

@ -0,0 +1,370 @@
//! # Notes
//!
//! The current implementation is somewhat limited. The `Waker` is not
//! implemented, as at the time of writing there is no way to support to wake-up
//! a thread from calling `poll_oneoff`.
//!
//! Furthermore the (re/de)register functions also don't work while concurrently
//! polling as both registering and polling requires a lock on the
//! `subscriptions`.
//!
//! Finally `Selector::try_clone`, required by `Registry::try_clone`, doesn't
//! work. However this could be implemented by use of an `Arc`.
//!
//! In summary, this only (barely) works using a single thread.
use std::cmp::min;
use std::io;
#[cfg(all(feature = "net", debug_assertions))]
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
#[cfg(feature = "net")]
use crate::{Interest, Token};
cfg_net! {
pub(crate) mod tcp {
use std::io;
use std::net::{self, SocketAddr};
pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
let (stream, addr) = listener.accept()?;
stream.set_nonblocking(true)?;
Ok((stream, addr))
}
}
}
/// Unique id for use as `SelectorId`.
#[cfg(all(debug_assertions, feature = "net"))]
static NEXT_ID: AtomicUsize = AtomicUsize::new(1);
pub(crate) struct Selector {
#[cfg(all(debug_assertions, feature = "net"))]
id: usize,
/// Subscriptions (reads events) we're interested in.
subscriptions: Arc<Mutex<Vec<wasi::Subscription>>>,
}
impl Selector {
pub(crate) fn new() -> io::Result<Selector> {
Ok(Selector {
#[cfg(all(debug_assertions, feature = "net"))]
id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
subscriptions: Arc::new(Mutex::new(Vec::new())),
})
}
#[cfg(all(debug_assertions, feature = "net"))]
pub(crate) fn id(&self) -> usize {
self.id
}
pub(crate) fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
events.clear();
let mut subscriptions = self.subscriptions.lock().unwrap();
// If we want to a use a timeout in the `wasi_poll_oneoff()` function
// we need another subscription to the list.
if let Some(timeout) = timeout {
subscriptions.push(timeout_subscription(timeout));
}
// `poll_oneoff` needs the same number of events as subscriptions.
let length = subscriptions.len();
events.reserve(length);
debug_assert!(events.capacity() >= length);
#[cfg(debug_assertions)]
if length == 0 {
warn!(
"calling mio::Poll::poll with empty subscriptions, this likely not what you want"
);
}
let res = unsafe { wasi::poll_oneoff(subscriptions.as_ptr(), events.as_mut_ptr(), length) };
// Remove the timeout subscription we possibly added above.
if timeout.is_some() {
let timeout_sub = subscriptions.pop();
debug_assert_eq!(
timeout_sub.unwrap().u.tag,
wasi::EVENTTYPE_CLOCK.raw(),
"failed to remove timeout subscription"
);
}
drop(subscriptions); // Unlock.
match res {
Ok(n_events) => {
// Safety: `poll_oneoff` initialises the `events` for us.
unsafe { events.set_len(n_events) };
// Remove the timeout event.
if timeout.is_some() {
if let Some(index) = events.iter().position(is_timeout_event) {
events.swap_remove(index);
}
}
check_errors(&events)
}
Err(err) => Err(io_err(err)),
}
}
pub(crate) fn try_clone(&self) -> io::Result<Selector> {
Ok(Selector {
#[cfg(all(debug_assertions, feature = "net"))]
id: self.id,
subscriptions: self.subscriptions.clone(),
})
}
#[cfg(feature = "net")]
pub(crate) fn register(
&self,
fd: wasi::Fd,
token: Token,
interests: Interest,
) -> io::Result<()> {
let mut subscriptions = self.subscriptions.lock().unwrap();
if interests.is_writable() {
let subscription = wasi::Subscription {
userdata: token.0 as wasi::Userdata,
u: wasi::SubscriptionU {
tag: wasi::EVENTTYPE_FD_WRITE.raw(),
u: wasi::SubscriptionUU {
fd_write: wasi::SubscriptionFdReadwrite {
file_descriptor: fd,
},
},
},
};
subscriptions.push(subscription);
}
if interests.is_readable() {
let subscription = wasi::Subscription {
userdata: token.0 as wasi::Userdata,
u: wasi::SubscriptionU {
tag: wasi::EVENTTYPE_FD_READ.raw(),
u: wasi::SubscriptionUU {
fd_read: wasi::SubscriptionFdReadwrite {
file_descriptor: fd,
},
},
},
};
subscriptions.push(subscription);
}
Ok(())
}
#[cfg(feature = "net")]
pub(crate) fn reregister(
&self,
fd: wasi::Fd,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.deregister(fd)
.and_then(|()| self.register(fd, token, interests))
}
#[cfg(feature = "net")]
pub(crate) fn deregister(&self, fd: wasi::Fd) -> io::Result<()> {
let mut subscriptions = self.subscriptions.lock().unwrap();
let predicate = |subscription: &wasi::Subscription| {
// Safety: `subscription.u.tag` defines the type of the union in
// `subscription.u.u`.
match subscription.u.tag {
t if t == wasi::EVENTTYPE_FD_WRITE.raw() => unsafe {
subscription.u.u.fd_write.file_descriptor == fd
},
t if t == wasi::EVENTTYPE_FD_READ.raw() => unsafe {
subscription.u.u.fd_read.file_descriptor == fd
},
_ => false,
}
};
let mut ret = Err(io::ErrorKind::NotFound.into());
while let Some(index) = subscriptions.iter().position(predicate) {
subscriptions.swap_remove(index);
ret = Ok(())
}
ret
}
}
/// Token used to a add a timeout subscription, also used in removing it again.
const TIMEOUT_TOKEN: wasi::Userdata = wasi::Userdata::max_value();
/// Returns a `wasi::Subscription` for `timeout`.
fn timeout_subscription(timeout: Duration) -> wasi::Subscription {
wasi::Subscription {
userdata: TIMEOUT_TOKEN,
u: wasi::SubscriptionU {
tag: wasi::EVENTTYPE_CLOCK.raw(),
u: wasi::SubscriptionUU {
clock: wasi::SubscriptionClock {
id: wasi::CLOCKID_MONOTONIC,
// Timestamp is in nanoseconds.
timeout: min(wasi::Timestamp::MAX as u128, timeout.as_nanos())
as wasi::Timestamp,
// Give the implementation another millisecond to coalesce
// events.
precision: Duration::from_millis(1).as_nanos() as wasi::Timestamp,
// Zero means the `timeout` is considered relative to the
// current time.
flags: 0,
},
},
},
}
}
fn is_timeout_event(event: &wasi::Event) -> bool {
event.type_ == wasi::EVENTTYPE_CLOCK && event.userdata == TIMEOUT_TOKEN
}
/// Check all events for possible errors, it returns the first error found.
fn check_errors(events: &[Event]) -> io::Result<()> {
for event in events {
if event.error != wasi::ERRNO_SUCCESS {
return Err(io_err(event.error));
}
}
Ok(())
}
/// Convert `wasi::Errno` into an `io::Error`.
fn io_err(errno: wasi::Errno) -> io::Error {
// TODO: check if this is valid.
io::Error::from_raw_os_error(errno.raw() as i32)
}
pub(crate) type Events = Vec<Event>;
pub(crate) type Event = wasi::Event;
pub(crate) mod event {
use std::fmt;
use crate::sys::Event;
use crate::Token;
pub(crate) fn token(event: &Event) -> Token {
Token(event.userdata as usize)
}
pub(crate) fn is_readable(event: &Event) -> bool {
event.type_ == wasi::EVENTTYPE_FD_READ
}
pub(crate) fn is_writable(event: &Event) -> bool {
event.type_ == wasi::EVENTTYPE_FD_WRITE
}
pub(crate) fn is_error(_: &Event) -> bool {
// Not supported? It could be that `wasi::Event.error` could be used for
// this, but the docs say `error that occurred while processing the
// subscription request`, so it's checked in `Select::select` already.
false
}
pub(crate) fn is_read_closed(event: &Event) -> bool {
event.type_ == wasi::EVENTTYPE_FD_READ
// Safety: checked the type of the union above.
&& (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0
}
pub(crate) fn is_write_closed(event: &Event) -> bool {
event.type_ == wasi::EVENTTYPE_FD_WRITE
// Safety: checked the type of the union above.
&& (event.fd_readwrite.flags & wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP) != 0
}
pub(crate) fn is_priority(_: &Event) -> bool {
// Not supported.
false
}
pub(crate) fn is_aio(_: &Event) -> bool {
// Not supported.
false
}
pub(crate) fn is_lio(_: &Event) -> bool {
// Not supported.
false
}
pub(crate) fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
debug_detail!(
TypeDetails(wasi::Eventtype),
PartialEq::eq,
wasi::EVENTTYPE_CLOCK,
wasi::EVENTTYPE_FD_READ,
wasi::EVENTTYPE_FD_WRITE,
);
#[allow(clippy::trivially_copy_pass_by_ref)]
fn check_flag(got: &wasi::Eventrwflags, want: &wasi::Eventrwflags) -> bool {
(got & want) != 0
}
debug_detail!(
EventrwflagsDetails(wasi::Eventrwflags),
check_flag,
wasi::EVENTRWFLAGS_FD_READWRITE_HANGUP,
);
struct EventFdReadwriteDetails(wasi::EventFdReadwrite);
impl fmt::Debug for EventFdReadwriteDetails {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EventFdReadwrite")
.field("nbytes", &self.0.nbytes)
.field("flags", &self.0.flags)
.finish()
}
}
f.debug_struct("Event")
.field("userdata", &event.userdata)
.field("error", &event.error)
.field("type", &TypeDetails(event.type_))
.field("fd_readwrite", &EventFdReadwriteDetails(event.fd_readwrite))
.finish()
}
}
cfg_os_poll! {
cfg_io_source! {
pub(crate) struct IoSourceState;
impl IoSourceState {
pub(crate) fn new() -> IoSourceState {
IoSourceState
}
pub(crate) fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
where
F: FnOnce(&T) -> io::Result<R>,
{
// We don't hold state, so we can just call the function and
// return.
f(io)
}
}
}
}

View file

@ -0,0 +1,255 @@
use std::ffi::c_void;
use std::fmt;
use std::fs::File;
use std::io;
use std::mem::size_of;
use std::os::windows::io::AsRawHandle;
use windows_sys::Win32::Foundation::{
RtlNtStatusToDosError, HANDLE, NTSTATUS, STATUS_NOT_FOUND, STATUS_PENDING, STATUS_SUCCESS,
};
use windows_sys::Win32::System::WindowsProgramming::{
NtDeviceIoControlFile, IO_STATUS_BLOCK, IO_STATUS_BLOCK_0,
};
const IOCTL_AFD_POLL: u32 = 0x00012024;
#[link(name = "ntdll")]
extern "system" {
/// See <https://processhacker.sourceforge.io/doc/ntioapi_8h.html#a0d4d550cad4d62d75b76961e25f6550c>
///
/// This is an undocumented API and as such not part of <https://github.com/microsoft/win32metadata>
/// from which `windows-sys` is generated, and also unlikely to be added, so
/// we manually declare it here
fn NtCancelIoFileEx(
FileHandle: HANDLE,
IoRequestToCancel: *mut IO_STATUS_BLOCK,
IoStatusBlock: *mut IO_STATUS_BLOCK,
) -> NTSTATUS;
}
/// Winsock2 AFD driver instance.
///
/// All operations are unsafe due to IO_STATUS_BLOCK parameter are being used by Afd driver during STATUS_PENDING before I/O Completion Port returns its result.
#[derive(Debug)]
pub struct Afd {
fd: File,
}
#[repr(C)]
#[derive(Debug)]
pub struct AfdPollHandleInfo {
pub handle: HANDLE,
pub events: u32,
pub status: NTSTATUS,
}
unsafe impl Send for AfdPollHandleInfo {}
#[repr(C)]
pub struct AfdPollInfo {
pub timeout: i64,
// Can have only value 1.
pub number_of_handles: u32,
pub exclusive: u32,
pub handles: [AfdPollHandleInfo; 1],
}
impl fmt::Debug for AfdPollInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AfdPollInfo").finish()
}
}
impl Afd {
/// Poll `Afd` instance with `AfdPollInfo`.
///
/// # Unsafety
///
/// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
/// `iosb` needs to be untouched after the call while operation is in effective at ALL TIME except for `cancel` method.
/// So be careful not to `poll` twice while polling.
/// User should deallocate there overlapped value when error to prevent memory leak.
pub unsafe fn poll(
&self,
info: &mut AfdPollInfo,
iosb: *mut IO_STATUS_BLOCK,
overlapped: *mut c_void,
) -> io::Result<bool> {
let info_ptr = info as *mut _ as *mut c_void;
(*iosb).Anonymous.Status = STATUS_PENDING;
let status = NtDeviceIoControlFile(
self.fd.as_raw_handle() as HANDLE,
0,
None,
overlapped,
iosb,
IOCTL_AFD_POLL,
info_ptr,
size_of::<AfdPollInfo>() as u32,
info_ptr,
size_of::<AfdPollInfo>() as u32,
);
match status {
STATUS_SUCCESS => Ok(true),
STATUS_PENDING => Ok(false),
_ => Err(io::Error::from_raw_os_error(
RtlNtStatusToDosError(status) as i32
)),
}
}
/// Cancel previous polled request of `Afd`.
///
/// iosb needs to be used by `poll` first for valid `cancel`.
///
/// # Unsafety
///
/// This function is unsafe due to memory of `IO_STATUS_BLOCK` still being used by `Afd` instance while `Ok(false)` (`STATUS_PENDING`).
/// Use it only with request is still being polled so that you have valid `IO_STATUS_BLOCK` to use.
/// User should NOT deallocate there overlapped value after the `cancel` to prevent double free.
pub unsafe fn cancel(&self, iosb: *mut IO_STATUS_BLOCK) -> io::Result<()> {
if (*iosb).Anonymous.Status != STATUS_PENDING {
return Ok(());
}
let mut cancel_iosb = IO_STATUS_BLOCK {
Anonymous: IO_STATUS_BLOCK_0 { Status: 0 },
Information: 0,
};
let status = NtCancelIoFileEx(self.fd.as_raw_handle() as HANDLE, iosb, &mut cancel_iosb);
if status == STATUS_SUCCESS || status == STATUS_NOT_FOUND {
return Ok(());
}
Err(io::Error::from_raw_os_error(
RtlNtStatusToDosError(status) as i32
))
}
}
cfg_io_source! {
use std::mem::zeroed;
use std::os::windows::io::{FromRawHandle, RawHandle};
use std::ptr::null_mut;
use std::sync::atomic::{AtomicUsize, Ordering};
use super::iocp::CompletionPort;
use windows_sys::Win32::{
Foundation::{UNICODE_STRING, INVALID_HANDLE_VALUE},
System::WindowsProgramming::{
OBJECT_ATTRIBUTES, FILE_SKIP_SET_EVENT_ON_HANDLE,
},
Storage::FileSystem::{FILE_OPEN, NtCreateFile, SetFileCompletionNotificationModes, SYNCHRONIZE, FILE_SHARE_READ, FILE_SHARE_WRITE},
};
const AFD_HELPER_ATTRIBUTES: OBJECT_ATTRIBUTES = OBJECT_ATTRIBUTES {
Length: size_of::<OBJECT_ATTRIBUTES>() as u32,
RootDirectory: 0,
ObjectName: &AFD_OBJ_NAME as *const _ as *mut _,
Attributes: 0,
SecurityDescriptor: null_mut(),
SecurityQualityOfService: null_mut(),
};
const AFD_OBJ_NAME: UNICODE_STRING = UNICODE_STRING {
Length: (AFD_HELPER_NAME.len() * size_of::<u16>()) as u16,
MaximumLength: (AFD_HELPER_NAME.len() * size_of::<u16>()) as u16,
Buffer: AFD_HELPER_NAME.as_ptr() as *mut _,
};
const AFD_HELPER_NAME: &[u16] = &[
'\\' as _,
'D' as _,
'e' as _,
'v' as _,
'i' as _,
'c' as _,
'e' as _,
'\\' as _,
'A' as _,
'f' as _,
'd' as _,
'\\' as _,
'M' as _,
'i' as _,
'o' as _
];
static NEXT_TOKEN: AtomicUsize = AtomicUsize::new(0);
impl AfdPollInfo {
pub fn zeroed() -> AfdPollInfo {
unsafe { zeroed() }
}
}
impl Afd {
/// Create new Afd instance.
pub(crate) fn new(cp: &CompletionPort) -> io::Result<Afd> {
let mut afd_helper_handle: HANDLE = INVALID_HANDLE_VALUE;
let mut iosb = IO_STATUS_BLOCK {
Anonymous: IO_STATUS_BLOCK_0 { Status: 0 },
Information: 0,
};
unsafe {
let status = NtCreateFile(
&mut afd_helper_handle as *mut _,
SYNCHRONIZE,
&AFD_HELPER_ATTRIBUTES as *const _ as *mut _,
&mut iosb,
null_mut(),
0,
FILE_SHARE_READ | FILE_SHARE_WRITE,
FILE_OPEN,
0,
null_mut(),
0,
);
if status != STATUS_SUCCESS {
let raw_err = io::Error::from_raw_os_error(
RtlNtStatusToDosError(status) as i32
);
let msg = format!("Failed to open \\Device\\Afd\\Mio: {}", raw_err);
return Err(io::Error::new(raw_err.kind(), msg));
}
let fd = File::from_raw_handle(afd_helper_handle as RawHandle);
// Increment by 2 to reserve space for other types of handles.
// Non-AFD types (currently only NamedPipe), use odd numbered
// tokens. This allows the selector to differentiate between them
// and dispatch events accordingly.
let token = NEXT_TOKEN.fetch_add(2, Ordering::Relaxed) + 2;
let afd = Afd { fd };
cp.add_handle(token, &afd.fd)?;
match SetFileCompletionNotificationModes(
afd_helper_handle,
FILE_SKIP_SET_EVENT_ON_HANDLE as u8 // This is just 2, so fits in u8
) {
0 => Err(io::Error::last_os_error()),
_ => Ok(afd),
}
}
}
}
}
pub const POLL_RECEIVE: u32 = 0b0_0000_0001;
pub const POLL_RECEIVE_EXPEDITED: u32 = 0b0_0000_0010;
pub const POLL_SEND: u32 = 0b0_0000_0100;
pub const POLL_DISCONNECT: u32 = 0b0_0000_1000;
pub const POLL_ABORT: u32 = 0b0_0001_0000;
pub const POLL_LOCAL_CLOSE: u32 = 0b0_0010_0000;
// Not used as it indicated in each event where a connection is connected, not
// just the first time a connection is established.
// Also see https://github.com/piscisaureus/wepoll/commit/8b7b340610f88af3d83f40fb728e7b850b090ece.
pub const POLL_CONNECT: u32 = 0b0_0100_0000;
pub const POLL_ACCEPT: u32 = 0b0_1000_0000;
pub const POLL_CONNECT_FAIL: u32 = 0b1_0000_0000;
pub const KNOWN_EVENTS: u32 = POLL_RECEIVE
| POLL_RECEIVE_EXPEDITED
| POLL_SEND
| POLL_DISCONNECT
| POLL_ABORT
| POLL_LOCAL_CLOSE
| POLL_ACCEPT
| POLL_CONNECT_FAIL;

View file

@ -0,0 +1,169 @@
use std::fmt;
use super::afd;
use super::iocp::CompletionStatus;
use crate::Token;
#[derive(Clone)]
pub struct Event {
pub flags: u32,
pub data: u64,
}
pub fn token(event: &Event) -> Token {
Token(event.data as usize)
}
impl Event {
pub(super) fn new(token: Token) -> Event {
Event {
flags: 0,
data: usize::from(token) as u64,
}
}
pub(super) fn set_readable(&mut self) {
self.flags |= afd::POLL_RECEIVE
}
#[cfg(feature = "os-ext")]
pub(super) fn set_writable(&mut self) {
self.flags |= afd::POLL_SEND;
}
pub(super) fn from_completion_status(status: &CompletionStatus) -> Event {
Event {
flags: status.bytes_transferred(),
data: status.token() as u64,
}
}
pub(super) fn to_completion_status(&self) -> CompletionStatus {
CompletionStatus::new(self.flags, self.data as usize, std::ptr::null_mut())
}
#[cfg(feature = "os-ext")]
pub(super) fn to_completion_status_with_overlapped(
&self,
overlapped: *mut super::Overlapped,
) -> CompletionStatus {
CompletionStatus::new(self.flags, self.data as usize, overlapped)
}
}
pub(crate) const READABLE_FLAGS: u32 = afd::POLL_RECEIVE
| afd::POLL_DISCONNECT
| afd::POLL_ACCEPT
| afd::POLL_ABORT
| afd::POLL_CONNECT_FAIL;
pub(crate) const WRITABLE_FLAGS: u32 = afd::POLL_SEND | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
pub(crate) const ERROR_FLAGS: u32 = afd::POLL_CONNECT_FAIL;
pub(crate) const READ_CLOSED_FLAGS: u32 =
afd::POLL_DISCONNECT | afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
pub(crate) const WRITE_CLOSED_FLAGS: u32 = afd::POLL_ABORT | afd::POLL_CONNECT_FAIL;
pub fn is_readable(event: &Event) -> bool {
event.flags & READABLE_FLAGS != 0
}
pub fn is_writable(event: &Event) -> bool {
event.flags & WRITABLE_FLAGS != 0
}
pub fn is_error(event: &Event) -> bool {
event.flags & ERROR_FLAGS != 0
}
pub fn is_read_closed(event: &Event) -> bool {
event.flags & READ_CLOSED_FLAGS != 0
}
pub fn is_write_closed(event: &Event) -> bool {
event.flags & WRITE_CLOSED_FLAGS != 0
}
pub fn is_priority(event: &Event) -> bool {
event.flags & afd::POLL_RECEIVE_EXPEDITED != 0
}
pub fn is_aio(_: &Event) -> bool {
// Not supported.
false
}
pub fn is_lio(_: &Event) -> bool {
// Not supported.
false
}
pub fn debug_details(f: &mut fmt::Formatter<'_>, event: &Event) -> fmt::Result {
#[allow(clippy::trivially_copy_pass_by_ref)]
fn check_flags(got: &u32, want: &u32) -> bool {
(got & want) != 0
}
debug_detail!(
FlagsDetails(u32),
check_flags,
afd::POLL_RECEIVE,
afd::POLL_RECEIVE_EXPEDITED,
afd::POLL_SEND,
afd::POLL_DISCONNECT,
afd::POLL_ABORT,
afd::POLL_LOCAL_CLOSE,
afd::POLL_CONNECT,
afd::POLL_ACCEPT,
afd::POLL_CONNECT_FAIL,
);
f.debug_struct("event")
.field("flags", &FlagsDetails(event.flags))
.field("data", &event.data)
.finish()
}
pub struct Events {
/// Raw I/O event completions are filled in here by the call to `get_many`
/// on the completion port above. These are then processed to run callbacks
/// which figure out what to do after the event is done.
pub statuses: Box<[CompletionStatus]>,
/// Literal events returned by `get` to the upwards `EventLoop`. This file
/// doesn't really modify this (except for the waker), instead almost all
/// events are filled in by the `ReadinessQueue` from the `poll` module.
pub events: Vec<Event>,
}
impl Events {
pub fn with_capacity(cap: usize) -> Events {
// Note that it's possible for the output `events` to grow beyond the
// capacity as it can also include deferred events, but that's certainly
// not the end of the world!
Events {
statuses: vec![CompletionStatus::zero(); cap].into_boxed_slice(),
events: Vec::with_capacity(cap),
}
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn get(&self, idx: usize) -> Option<&Event> {
self.events.get(idx)
}
pub fn clear(&mut self) {
self.events.clear();
for status in self.statuses.iter_mut() {
*status = CompletionStatus::zero();
}
}
}

View file

@ -0,0 +1,30 @@
use std::os::windows::io::RawHandle;
use windows_sys::Win32::Foundation::{CloseHandle, HANDLE};
/// Wrapper around a Windows HANDLE so that we close it upon drop in all scenarios
#[derive(Debug)]
pub struct Handle(HANDLE);
impl Handle {
#[inline]
pub fn new(handle: HANDLE) -> Self {
Self(handle)
}
pub fn raw(&self) -> HANDLE {
self.0
}
pub fn into_raw(self) -> RawHandle {
let ret = self.0;
// This is super important so that drop is not called!
std::mem::forget(self);
ret as RawHandle
}
}
impl Drop for Handle {
fn drop(&mut self) {
unsafe { CloseHandle(self.0) };
}
}

View file

@ -0,0 +1,40 @@
use std::fmt;
use std::ops::{Deref, DerefMut};
use windows_sys::Win32::System::WindowsProgramming::IO_STATUS_BLOCK;
pub struct IoStatusBlock(IO_STATUS_BLOCK);
cfg_io_source! {
use windows_sys::Win32::System::WindowsProgramming::{IO_STATUS_BLOCK_0};
impl IoStatusBlock {
pub fn zeroed() -> Self {
Self(IO_STATUS_BLOCK {
Anonymous: IO_STATUS_BLOCK_0 { Status: 0 },
Information: 0,
})
}
}
}
unsafe impl Send for IoStatusBlock {}
impl Deref for IoStatusBlock {
type Target = IO_STATUS_BLOCK;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for IoStatusBlock {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl fmt::Debug for IoStatusBlock {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IoStatusBlock").finish()
}
}

View file

@ -0,0 +1,273 @@
//! Bindings to IOCP, I/O Completion Ports
use super::{Handle, Overlapped};
use std::cmp;
use std::fmt;
use std::io;
use std::mem;
use std::os::windows::io::*;
use std::time::Duration;
use windows_sys::Win32::Foundation::{HANDLE, INVALID_HANDLE_VALUE};
use windows_sys::Win32::System::IO::{
CreateIoCompletionPort, GetQueuedCompletionStatusEx, PostQueuedCompletionStatus, OVERLAPPED,
OVERLAPPED_ENTRY,
};
/// A handle to an Windows I/O Completion Port.
#[derive(Debug)]
pub(crate) struct CompletionPort {
handle: Handle,
}
/// A status message received from an I/O completion port.
///
/// These statuses can be created via the `new` or `empty` constructors and then
/// provided to a completion port, or they are read out of a completion port.
/// The fields of each status are read through its accessor methods.
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct CompletionStatus(OVERLAPPED_ENTRY);
impl fmt::Debug for CompletionStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CompletionStatus(OVERLAPPED_ENTRY)")
}
}
unsafe impl Send for CompletionStatus {}
unsafe impl Sync for CompletionStatus {}
impl CompletionPort {
/// Creates a new I/O completion port with the specified concurrency value.
///
/// The number of threads given corresponds to the level of concurrency
/// allowed for threads associated with this port. Consult the Windows
/// documentation for more information about this value.
pub fn new(threads: u32) -> io::Result<CompletionPort> {
let ret = unsafe { CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, threads) };
if ret == 0 {
Err(io::Error::last_os_error())
} else {
Ok(CompletionPort {
handle: Handle::new(ret),
})
}
}
/// Associates a new `HANDLE` to this I/O completion port.
///
/// This function will associate the given handle to this port with the
/// given `token` to be returned in status messages whenever it receives a
/// notification.
///
/// Any object which is convertible to a `HANDLE` via the `AsRawHandle`
/// trait can be provided to this function, such as `std::fs::File` and
/// friends.
#[cfg(any(feature = "net", feature = "os-ext"))]
pub fn add_handle<T: AsRawHandle + ?Sized>(&self, token: usize, t: &T) -> io::Result<()> {
let ret = unsafe {
CreateIoCompletionPort(t.as_raw_handle() as HANDLE, self.handle.raw(), token, 0)
};
if ret == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Dequeues a number of completion statuses from this I/O completion port.
///
/// This function is the same as `get` except that it may return more than
/// one status. A buffer of "zero" statuses is provided (the contents are
/// not read) and then on success this function will return a sub-slice of
/// statuses which represent those which were dequeued from this port. This
/// function does not wait to fill up the entire list of statuses provided.
///
/// Like with `get`, a timeout may be specified for this operation.
pub fn get_many<'a>(
&self,
list: &'a mut [CompletionStatus],
timeout: Option<Duration>,
) -> io::Result<&'a mut [CompletionStatus]> {
debug_assert_eq!(
mem::size_of::<CompletionStatus>(),
mem::size_of::<OVERLAPPED_ENTRY>()
);
let mut removed = 0;
let timeout = duration_millis(timeout);
let len = cmp::min(list.len(), <u32>::max_value() as usize) as u32;
let ret = unsafe {
GetQueuedCompletionStatusEx(
self.handle.raw(),
list.as_ptr() as *mut _,
len,
&mut removed,
timeout,
0,
)
};
if ret == 0 {
Err(io::Error::last_os_error())
} else {
Ok(&mut list[..removed as usize])
}
}
/// Posts a new completion status onto this I/O completion port.
///
/// This function will post the given status, with custom parameters, to the
/// port. Threads blocked in `get` or `get_many` will eventually receive
/// this status.
pub fn post(&self, status: CompletionStatus) -> io::Result<()> {
let ret = unsafe {
PostQueuedCompletionStatus(
self.handle.raw(),
status.0.dwNumberOfBytesTransferred,
status.0.lpCompletionKey,
status.0.lpOverlapped,
)
};
if ret == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
impl AsRawHandle for CompletionPort {
fn as_raw_handle(&self) -> RawHandle {
self.handle.raw() as RawHandle
}
}
impl FromRawHandle for CompletionPort {
unsafe fn from_raw_handle(handle: RawHandle) -> CompletionPort {
CompletionPort {
handle: Handle::new(handle as HANDLE),
}
}
}
impl IntoRawHandle for CompletionPort {
fn into_raw_handle(self) -> RawHandle {
self.handle.into_raw()
}
}
impl CompletionStatus {
/// Creates a new completion status with the provided parameters.
///
/// This function is useful when creating a status to send to a port with
/// the `post` method. The parameters are opaquely passed through and not
/// interpreted by the system at all.
pub(crate) fn new(bytes: u32, token: usize, overlapped: *mut Overlapped) -> Self {
CompletionStatus(OVERLAPPED_ENTRY {
dwNumberOfBytesTransferred: bytes,
lpCompletionKey: token,
lpOverlapped: overlapped as *mut _,
Internal: 0,
})
}
/// Creates a new borrowed completion status from the borrowed
/// `OVERLAPPED_ENTRY` argument provided.
///
/// This method will wrap the `OVERLAPPED_ENTRY` in a `CompletionStatus`,
/// returning the wrapped structure.
#[cfg(feature = "os-ext")]
pub fn from_entry(entry: &OVERLAPPED_ENTRY) -> &Self {
// Safety: CompletionStatus is repr(transparent) w/ OVERLAPPED_ENTRY, so
// a reference to one is guaranteed to be layout compatible with the
// reference to another.
unsafe { &*(entry as *const _ as *const _) }
}
/// Creates a new "zero" completion status.
///
/// This function is useful when creating a stack buffer or vector of
/// completion statuses to be passed to the `get_many` function.
pub fn zero() -> Self {
Self::new(0, 0, std::ptr::null_mut())
}
/// Returns the number of bytes that were transferred for the I/O operation
/// associated with this completion status.
pub fn bytes_transferred(&self) -> u32 {
self.0.dwNumberOfBytesTransferred
}
/// Returns the completion key value associated with the file handle whose
/// I/O operation has completed.
///
/// A completion key is a per-handle key that is specified when it is added
/// to an I/O completion port via `add_handle` or `add_socket`.
pub fn token(&self) -> usize {
self.0.lpCompletionKey as usize
}
/// Returns a pointer to the `Overlapped` structure that was specified when
/// the I/O operation was started.
pub fn overlapped(&self) -> *mut OVERLAPPED {
self.0.lpOverlapped
}
/// Returns a pointer to the internal `OVERLAPPED_ENTRY` object.
pub fn entry(&self) -> &OVERLAPPED_ENTRY {
&self.0
}
}
#[inline]
fn duration_millis(dur: Option<Duration>) -> u32 {
if let Some(dur) = dur {
// `Duration::as_millis` truncates, so round up. This avoids
// turning sub-millisecond timeouts into a zero timeout, unless
// the caller explicitly requests that by specifying a zero
// timeout.
let dur_ms = dur
.checked_add(Duration::from_nanos(999_999))
.unwrap_or(dur)
.as_millis();
cmp::min(dur_ms, u32::MAX as u128) as u32
} else {
u32::MAX
}
}
#[cfg(test)]
mod tests {
use super::{CompletionPort, CompletionStatus};
#[test]
fn is_send_sync() {
fn is_send_sync<T: Send + Sync>() {}
is_send_sync::<CompletionPort>();
}
#[test]
fn get_many() {
let c = CompletionPort::new(1).unwrap();
c.post(CompletionStatus::new(1, 2, 3 as *mut _)).unwrap();
c.post(CompletionStatus::new(4, 5, 6 as *mut _)).unwrap();
let mut s = vec![CompletionStatus::zero(); 4];
{
let s = c.get_many(&mut s, None).unwrap();
assert_eq!(s.len(), 2);
assert_eq!(s[0].bytes_transferred(), 1);
assert_eq!(s[0].token(), 2);
assert_eq!(s[0].overlapped(), 3 as *mut _);
assert_eq!(s[1].bytes_transferred(), 4);
assert_eq!(s[1].token(), 5);
assert_eq!(s[1].overlapped(), 6 as *mut _);
}
assert_eq!(s[2].bytes_transferred(), 0);
assert_eq!(s[2].token(), 0);
assert_eq!(s[2].overlapped(), 0 as *mut _);
}
}

View file

@ -0,0 +1,154 @@
mod afd;
pub mod event;
pub use event::{Event, Events};
mod handle;
use handle::Handle;
mod io_status_block;
mod iocp;
mod overlapped;
use overlapped::Overlapped;
mod selector;
pub use selector::Selector;
// Macros must be defined before the modules that use them
cfg_net! {
/// Helper macro to execute a system call that returns an `io::Result`.
//
// Macro must be defined before any modules that uses them.
macro_rules! syscall {
($fn: ident ( $($arg: expr),* $(,)* ), $err_test: path, $err_value: expr) => {{
let res = unsafe { $fn($($arg, )*) };
if $err_test(&res, &$err_value) {
Err(io::Error::last_os_error())
} else {
Ok(res)
}
}};
}
mod net;
pub(crate) mod tcp;
pub(crate) mod udp;
pub use selector::{SelectorInner, SockState};
}
cfg_os_ext! {
pub(crate) mod named_pipe;
}
mod waker;
pub(crate) use waker::Waker;
cfg_io_source! {
use std::io;
use std::os::windows::io::RawSocket;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use crate::{Interest, Registry, Token};
struct InternalState {
selector: Arc<SelectorInner>,
token: Token,
interests: Interest,
sock_state: Pin<Arc<Mutex<SockState>>>,
}
impl Drop for InternalState {
fn drop(&mut self) {
let mut sock_state = self.sock_state.lock().unwrap();
sock_state.mark_delete();
}
}
pub struct IoSourceState {
// This is `None` if the socket has not yet been registered.
//
// We box the internal state to not increase the size on the stack as the
// type might move around a lot.
inner: Option<Box<InternalState>>,
}
impl IoSourceState {
pub fn new() -> IoSourceState {
IoSourceState { inner: None }
}
pub fn do_io<T, F, R>(&self, f: F, io: &T) -> io::Result<R>
where
F: FnOnce(&T) -> io::Result<R>,
{
let result = f(io);
if let Err(ref e) = result {
if e.kind() == io::ErrorKind::WouldBlock {
self.inner.as_ref().map_or(Ok(()), |state| {
state
.selector
.reregister(state.sock_state.clone(), state.token, state.interests)
})?;
}
}
result
}
pub fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
socket: RawSocket,
) -> io::Result<()> {
if self.inner.is_some() {
Err(io::ErrorKind::AlreadyExists.into())
} else {
registry
.selector()
.register(socket, token, interests)
.map(|state| {
self.inner = Some(Box::new(state));
})
}
}
pub fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match self.inner.as_mut() {
Some(state) => {
registry
.selector()
.reregister(state.sock_state.clone(), token, interests)
.map(|()| {
state.token = token;
state.interests = interests;
})
}
None => Err(io::ErrorKind::NotFound.into()),
}
}
pub fn deregister(&mut self) -> io::Result<()> {
match self.inner.as_mut() {
Some(state) => {
{
let mut sock_state = state.sock_state.lock().unwrap();
sock_state.mark_delete();
}
self.inner = None;
Ok(())
}
None => Err(io::ErrorKind::NotFound.into()),
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,111 @@
use std::io;
use std::mem;
use std::net::SocketAddr;
use std::sync::Once;
use windows_sys::Win32::Networking::WinSock::{
closesocket, ioctlsocket, socket, AF_INET, AF_INET6, FIONBIO, IN6_ADDR, IN6_ADDR_0,
INVALID_SOCKET, IN_ADDR, IN_ADDR_0, SOCKADDR, SOCKADDR_IN, SOCKADDR_IN6, SOCKADDR_IN6_0,
SOCKET,
};
/// Initialise the network stack for Windows.
fn init() {
static INIT: Once = Once::new();
INIT.call_once(|| {
// Let standard library call `WSAStartup` for us, we can't do it
// ourselves because otherwise using any type in `std::net` would panic
// when it tries to call `WSAStartup` a second time.
drop(std::net::UdpSocket::bind("127.0.0.1:0"));
});
}
/// Create a new non-blocking socket.
pub(crate) fn new_ip_socket(addr: SocketAddr, socket_type: i32) -> io::Result<SOCKET> {
let domain = match addr {
SocketAddr::V4(..) => AF_INET,
SocketAddr::V6(..) => AF_INET6,
};
new_socket(domain.into(), socket_type)
}
pub(crate) fn new_socket(domain: u32, socket_type: i32) -> io::Result<SOCKET> {
init();
let socket = syscall!(
socket(domain as i32, socket_type, 0),
PartialEq::eq,
INVALID_SOCKET
)?;
if let Err(err) = syscall!(ioctlsocket(socket, FIONBIO, &mut 1), PartialEq::ne, 0) {
let _ = unsafe { closesocket(socket) };
return Err(err);
}
Ok(socket as SOCKET)
}
/// A type with the same memory layout as `SOCKADDR`. Used in converting Rust level
/// SocketAddr* types into their system representation. The benefit of this specific
/// type over using `SOCKADDR_STORAGE` is that this type is exactly as large as it
/// needs to be and not a lot larger. And it can be initialized cleaner from Rust.
#[repr(C)]
pub(crate) union SocketAddrCRepr {
v4: SOCKADDR_IN,
v6: SOCKADDR_IN6,
}
impl SocketAddrCRepr {
pub(crate) fn as_ptr(&self) -> *const SOCKADDR {
self as *const _ as *const SOCKADDR
}
}
pub(crate) fn socket_addr(addr: &SocketAddr) -> (SocketAddrCRepr, i32) {
match addr {
SocketAddr::V4(ref addr) => {
// `s_addr` is stored as BE on all machine and the array is in BE order.
// So the native endian conversion method is used so that it's never swapped.
let sin_addr = unsafe {
let mut s_un = mem::zeroed::<IN_ADDR_0>();
s_un.S_addr = u32::from_ne_bytes(addr.ip().octets());
IN_ADDR { S_un: s_un }
};
let sockaddr_in = SOCKADDR_IN {
sin_family: AF_INET as u16, // 1
sin_port: addr.port().to_be(),
sin_addr,
sin_zero: [0; 8],
};
let sockaddr = SocketAddrCRepr { v4: sockaddr_in };
(sockaddr, mem::size_of::<SOCKADDR_IN>() as i32)
}
SocketAddr::V6(ref addr) => {
let sin6_addr = unsafe {
let mut u = mem::zeroed::<IN6_ADDR_0>();
u.Byte = addr.ip().octets();
IN6_ADDR { u }
};
let u = unsafe {
let mut u = mem::zeroed::<SOCKADDR_IN6_0>();
u.sin6_scope_id = addr.scope_id();
u
};
let sockaddr_in6 = SOCKADDR_IN6 {
sin6_family: AF_INET6 as u16, // 23
sin6_port: addr.port().to_be(),
sin6_addr,
sin6_flowinfo: addr.flowinfo(),
Anonymous: u,
};
let sockaddr = SocketAddrCRepr { v6: sockaddr_in6 };
(sockaddr, mem::size_of::<SOCKADDR_IN6>() as i32)
}
}
}

View file

@ -0,0 +1,35 @@
use crate::sys::windows::Event;
use std::cell::UnsafeCell;
use std::fmt;
use windows_sys::Win32::System::IO::{OVERLAPPED, OVERLAPPED_ENTRY};
#[repr(C)]
pub(crate) struct Overlapped {
inner: UnsafeCell<OVERLAPPED>,
pub(crate) callback: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>),
}
#[cfg(feature = "os-ext")]
impl Overlapped {
pub(crate) fn new(cb: fn(&OVERLAPPED_ENTRY, Option<&mut Vec<Event>>)) -> Overlapped {
Overlapped {
inner: UnsafeCell::new(unsafe { std::mem::zeroed() }),
callback: cb,
}
}
pub(crate) fn as_ptr(&self) -> *const OVERLAPPED {
self.inner.get()
}
}
impl fmt::Debug for Overlapped {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Overlapped").finish()
}
}
unsafe impl Send for Overlapped {}
unsafe impl Sync for Overlapped {}

View file

@ -0,0 +1,741 @@
use super::afd::{self, Afd, AfdPollInfo};
use super::io_status_block::IoStatusBlock;
use super::Event;
use crate::sys::Events;
cfg_net! {
use crate::sys::event::{
ERROR_FLAGS, READABLE_FLAGS, READ_CLOSED_FLAGS, WRITABLE_FLAGS, WRITE_CLOSED_FLAGS,
};
use crate::Interest;
}
use super::iocp::{CompletionPort, CompletionStatus};
use std::collections::VecDeque;
use std::ffi::c_void;
use std::io;
use std::marker::PhantomPinned;
use std::os::windows::io::RawSocket;
use std::pin::Pin;
#[cfg(debug_assertions)]
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use windows_sys::Win32::Foundation::{
ERROR_INVALID_HANDLE, ERROR_IO_PENDING, HANDLE, STATUS_CANCELLED, WAIT_TIMEOUT,
};
use windows_sys::Win32::System::IO::OVERLAPPED;
#[derive(Debug)]
struct AfdGroup {
#[cfg_attr(not(feature = "net"), allow(dead_code))]
cp: Arc<CompletionPort>,
afd_group: Mutex<Vec<Arc<Afd>>>,
}
impl AfdGroup {
pub fn new(cp: Arc<CompletionPort>) -> AfdGroup {
AfdGroup {
afd_group: Mutex::new(Vec::new()),
cp,
}
}
pub fn release_unused_afd(&self) {
let mut afd_group = self.afd_group.lock().unwrap();
afd_group.retain(|g| Arc::strong_count(g) > 1);
}
}
cfg_io_source! {
const POLL_GROUP__MAX_GROUP_SIZE: usize = 32;
impl AfdGroup {
pub fn acquire(&self) -> io::Result<Arc<Afd>> {
let mut afd_group = self.afd_group.lock().unwrap();
if afd_group.len() == 0 {
self._alloc_afd_group(&mut afd_group)?;
} else {
// + 1 reference in Vec
if Arc::strong_count(afd_group.last().unwrap()) > POLL_GROUP__MAX_GROUP_SIZE {
self._alloc_afd_group(&mut afd_group)?;
}
}
match afd_group.last() {
Some(arc) => Ok(arc.clone()),
None => unreachable!(
"Cannot acquire afd, {:#?}, afd_group: {:#?}",
self, afd_group
),
}
}
fn _alloc_afd_group(&self, afd_group: &mut Vec<Arc<Afd>>) -> io::Result<()> {
let afd = Afd::new(&self.cp)?;
let arc = Arc::new(afd);
afd_group.push(arc);
Ok(())
}
}
}
#[derive(Debug)]
enum SockPollStatus {
Idle,
Pending,
Cancelled,
}
#[derive(Debug)]
pub struct SockState {
iosb: IoStatusBlock,
poll_info: AfdPollInfo,
afd: Arc<Afd>,
base_socket: RawSocket,
user_evts: u32,
pending_evts: u32,
user_data: u64,
poll_status: SockPollStatus,
delete_pending: bool,
// last raw os error
error: Option<i32>,
_pinned: PhantomPinned,
}
impl SockState {
fn update(&mut self, self_arc: &Pin<Arc<Mutex<SockState>>>) -> io::Result<()> {
assert!(!self.delete_pending);
// make sure to reset previous error before a new update
self.error = None;
if let SockPollStatus::Pending = self.poll_status {
if (self.user_evts & afd::KNOWN_EVENTS & !self.pending_evts) == 0 {
/* All the events the user is interested in are already being monitored by
* the pending poll operation. It might spuriously complete because of an
* event that we're no longer interested in; when that happens we'll submit
* a new poll operation with the updated event mask. */
} else {
/* A poll operation is already pending, but it's not monitoring for all the
* events that the user is interested in. Therefore, cancel the pending
* poll operation; when we receive it's completion package, a new poll
* operation will be submitted with the correct event mask. */
if let Err(e) = self.cancel() {
self.error = e.raw_os_error();
return Err(e);
}
return Ok(());
}
} else if let SockPollStatus::Cancelled = self.poll_status {
/* The poll operation has already been cancelled, we're still waiting for
* it to return. For now, there's nothing that needs to be done. */
} else if let SockPollStatus::Idle = self.poll_status {
/* No poll operation is pending; start one. */
self.poll_info.exclusive = 0;
self.poll_info.number_of_handles = 1;
self.poll_info.timeout = i64::MAX;
self.poll_info.handles[0].handle = self.base_socket as HANDLE;
self.poll_info.handles[0].status = 0;
self.poll_info.handles[0].events = self.user_evts | afd::POLL_LOCAL_CLOSE;
// Increase the ref count as the memory will be used by the kernel.
let overlapped_ptr = into_overlapped(self_arc.clone());
let result = unsafe {
self.afd
.poll(&mut self.poll_info, &mut *self.iosb, overlapped_ptr)
};
if let Err(e) = result {
let code = e.raw_os_error().unwrap();
if code == ERROR_IO_PENDING as i32 {
/* Overlapped poll operation in progress; this is expected. */
} else {
// Since the operation failed it means the kernel won't be
// using the memory any more.
drop(from_overlapped(overlapped_ptr as *mut _));
if code == ERROR_INVALID_HANDLE as i32 {
/* Socket closed; it'll be dropped. */
self.mark_delete();
return Ok(());
} else {
self.error = e.raw_os_error();
return Err(e);
}
}
}
self.poll_status = SockPollStatus::Pending;
self.pending_evts = self.user_evts;
} else {
unreachable!("Invalid poll status during update, {:#?}", self)
}
Ok(())
}
fn cancel(&mut self) -> io::Result<()> {
match self.poll_status {
SockPollStatus::Pending => {}
_ => unreachable!("Invalid poll status during cancel, {:#?}", self),
};
unsafe {
self.afd.cancel(&mut *self.iosb)?;
}
self.poll_status = SockPollStatus::Cancelled;
self.pending_evts = 0;
Ok(())
}
// This is the function called from the overlapped using as Arc<Mutex<SockState>>. Watch out for reference counting.
fn feed_event(&mut self) -> Option<Event> {
self.poll_status = SockPollStatus::Idle;
self.pending_evts = 0;
let mut afd_events = 0;
// We use the status info in IO_STATUS_BLOCK to determine the socket poll status. It is unsafe to use a pointer of IO_STATUS_BLOCK.
unsafe {
if self.delete_pending {
return None;
} else if self.iosb.Anonymous.Status == STATUS_CANCELLED {
/* The poll request was cancelled by CancelIoEx. */
} else if self.iosb.Anonymous.Status < 0 {
/* The overlapped request itself failed in an unexpected way. */
afd_events = afd::POLL_CONNECT_FAIL;
} else if self.poll_info.number_of_handles < 1 {
/* This poll operation succeeded but didn't report any socket events. */
} else if self.poll_info.handles[0].events & afd::POLL_LOCAL_CLOSE != 0 {
/* The poll operation reported that the socket was closed. */
self.mark_delete();
return None;
} else {
afd_events = self.poll_info.handles[0].events;
}
}
afd_events &= self.user_evts;
if afd_events == 0 {
return None;
}
// In mio, we have to simulate Edge-triggered behavior to match API usage.
// The strategy here is to intercept all read/write from user that could cause WouldBlock usage,
// then reregister the socket to reset the interests.
self.user_evts &= !afd_events;
Some(Event {
data: self.user_data,
flags: afd_events,
})
}
pub fn is_pending_deletion(&self) -> bool {
self.delete_pending
}
pub fn mark_delete(&mut self) {
if !self.delete_pending {
if let SockPollStatus::Pending = self.poll_status {
drop(self.cancel());
}
self.delete_pending = true;
}
}
fn has_error(&self) -> bool {
self.error.is_some()
}
}
cfg_io_source! {
impl SockState {
fn new(raw_socket: RawSocket, afd: Arc<Afd>) -> io::Result<SockState> {
Ok(SockState {
iosb: IoStatusBlock::zeroed(),
poll_info: AfdPollInfo::zeroed(),
afd,
base_socket: get_base_socket(raw_socket)?,
user_evts: 0,
pending_evts: 0,
user_data: 0,
poll_status: SockPollStatus::Idle,
delete_pending: false,
error: None,
_pinned: PhantomPinned,
})
}
/// True if need to be added on update queue, false otherwise.
fn set_event(&mut self, ev: Event) -> bool {
/* afd::POLL_CONNECT_FAIL and afd::POLL_ABORT are always reported, even when not requested by the caller. */
let events = ev.flags | afd::POLL_CONNECT_FAIL | afd::POLL_ABORT;
self.user_evts = events;
self.user_data = ev.data;
(events & !self.pending_evts) != 0
}
}
}
impl Drop for SockState {
fn drop(&mut self) {
self.mark_delete();
}
}
/// Converts the pointer to a `SockState` into a raw pointer.
/// To revert see `from_overlapped`.
fn into_overlapped(sock_state: Pin<Arc<Mutex<SockState>>>) -> *mut c_void {
let overlapped_ptr: *const Mutex<SockState> =
unsafe { Arc::into_raw(Pin::into_inner_unchecked(sock_state)) };
overlapped_ptr as *mut _
}
/// Convert a raw overlapped pointer into a reference to `SockState`.
/// Reverts `into_overlapped`.
fn from_overlapped(ptr: *mut OVERLAPPED) -> Pin<Arc<Mutex<SockState>>> {
let sock_ptr: *const Mutex<SockState> = ptr as *const _;
unsafe { Pin::new_unchecked(Arc::from_raw(sock_ptr)) }
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
#[cfg(debug_assertions)]
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
/// Windows implementation of `sys::Selector`
///
/// Edge-triggered event notification is simulated by resetting internal event flag of each socket state `SockState`
/// and setting all events back by intercepting all requests that could cause `io::ErrorKind::WouldBlock` happening.
///
/// This selector is currently only support socket due to `Afd` driver is winsock2 specific.
#[derive(Debug)]
pub struct Selector {
#[cfg(debug_assertions)]
id: usize,
pub(super) inner: Arc<SelectorInner>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
SelectorInner::new().map(|inner| {
#[cfg(debug_assertions)]
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
Selector {
#[cfg(debug_assertions)]
id,
inner: Arc::new(inner),
}
})
}
pub fn try_clone(&self) -> io::Result<Selector> {
Ok(Selector {
#[cfg(debug_assertions)]
id: self.id,
inner: Arc::clone(&self.inner),
})
}
/// # Safety
///
/// This requires a mutable reference to self because only a single thread
/// can poll IOCP at a time.
pub fn select(&mut self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
self.inner.select(events, timeout)
}
pub(super) fn clone_port(&self) -> Arc<CompletionPort> {
self.inner.cp.clone()
}
#[cfg(feature = "os-ext")]
pub(super) fn same_port(&self, other: &Arc<CompletionPort>) -> bool {
Arc::ptr_eq(&self.inner.cp, other)
}
}
cfg_io_source! {
use super::InternalState;
use crate::Token;
impl Selector {
pub(super) fn register(
&self,
socket: RawSocket,
token: Token,
interests: Interest,
) -> io::Result<InternalState> {
SelectorInner::register(&self.inner, socket, token, interests)
}
pub(super) fn reregister(
&self,
state: Pin<Arc<Mutex<SockState>>>,
token: Token,
interests: Interest,
) -> io::Result<()> {
self.inner.reregister(state, token, interests)
}
#[cfg(debug_assertions)]
pub fn id(&self) -> usize {
self.id
}
}
}
#[derive(Debug)]
pub struct SelectorInner {
pub(super) cp: Arc<CompletionPort>,
update_queue: Mutex<VecDeque<Pin<Arc<Mutex<SockState>>>>>,
afd_group: AfdGroup,
is_polling: AtomicBool,
}
// We have ensured thread safety by introducing lock manually.
unsafe impl Sync for SelectorInner {}
impl SelectorInner {
pub fn new() -> io::Result<SelectorInner> {
CompletionPort::new(0).map(|cp| {
let cp = Arc::new(cp);
let cp_afd = Arc::clone(&cp);
SelectorInner {
cp,
update_queue: Mutex::new(VecDeque::new()),
afd_group: AfdGroup::new(cp_afd),
is_polling: AtomicBool::new(false),
}
})
}
/// # Safety
///
/// May only be calling via `Selector::select`.
pub fn select(&self, events: &mut Events, timeout: Option<Duration>) -> io::Result<()> {
events.clear();
if timeout.is_none() {
loop {
let len = self.select2(&mut events.statuses, &mut events.events, None)?;
if len == 0 {
continue;
}
break Ok(());
}
} else {
self.select2(&mut events.statuses, &mut events.events, timeout)?;
Ok(())
}
}
pub fn select2(
&self,
statuses: &mut [CompletionStatus],
events: &mut Vec<Event>,
timeout: Option<Duration>,
) -> io::Result<usize> {
assert!(!self.is_polling.swap(true, Ordering::AcqRel));
unsafe { self.update_sockets_events() }?;
let result = self.cp.get_many(statuses, timeout);
self.is_polling.store(false, Ordering::Relaxed);
match result {
Ok(iocp_events) => Ok(unsafe { self.feed_events(events, iocp_events) }),
Err(ref e) if e.raw_os_error() == Some(WAIT_TIMEOUT as i32) => Ok(0),
Err(e) => Err(e),
}
}
unsafe fn update_sockets_events(&self) -> io::Result<()> {
let mut update_queue = self.update_queue.lock().unwrap();
for sock in update_queue.iter_mut() {
let mut sock_internal = sock.lock().unwrap();
if !sock_internal.is_pending_deletion() {
sock_internal.update(sock)?;
}
}
// remove all sock which do not have error, they have afd op pending
update_queue.retain(|sock| sock.lock().unwrap().has_error());
self.afd_group.release_unused_afd();
Ok(())
}
// It returns processed count of iocp_events rather than the events itself.
unsafe fn feed_events(
&self,
events: &mut Vec<Event>,
iocp_events: &[CompletionStatus],
) -> usize {
let mut n = 0;
let mut update_queue = self.update_queue.lock().unwrap();
for iocp_event in iocp_events.iter() {
if iocp_event.overlapped().is_null() {
events.push(Event::from_completion_status(iocp_event));
n += 1;
continue;
} else if iocp_event.token() % 2 == 1 {
// Handle is a named pipe. This could be extended to be any non-AFD event.
let callback = (*(iocp_event.overlapped() as *mut super::Overlapped)).callback;
let len = events.len();
callback(iocp_event.entry(), Some(events));
n += events.len() - len;
continue;
}
let sock_state = from_overlapped(iocp_event.overlapped());
let mut sock_guard = sock_state.lock().unwrap();
if let Some(e) = sock_guard.feed_event() {
events.push(e);
n += 1;
}
if !sock_guard.is_pending_deletion() {
update_queue.push_back(sock_state.clone());
}
}
self.afd_group.release_unused_afd();
n
}
}
cfg_io_source! {
use std::mem::size_of;
use std::ptr::null_mut;
use windows_sys::Win32::Networking::WinSock::{
WSAGetLastError, WSAIoctl, SIO_BASE_HANDLE, SIO_BSP_HANDLE,
SIO_BSP_HANDLE_POLL, SIO_BSP_HANDLE_SELECT, SOCKET_ERROR,
};
impl SelectorInner {
fn register(
this: &Arc<Self>,
socket: RawSocket,
token: Token,
interests: Interest,
) -> io::Result<InternalState> {
let flags = interests_to_afd_flags(interests);
let sock = {
let sock = this._alloc_sock_for_rawsocket(socket)?;
let event = Event {
flags,
data: token.0 as u64,
};
sock.lock().unwrap().set_event(event);
sock
};
let state = InternalState {
selector: this.clone(),
token,
interests,
sock_state: sock.clone(),
};
this.queue_state(sock);
unsafe { this.update_sockets_events_if_polling()? };
Ok(state)
}
// Directly accessed in `IoSourceState::do_io`.
pub(super) fn reregister(
&self,
state: Pin<Arc<Mutex<SockState>>>,
token: Token,
interests: Interest,
) -> io::Result<()> {
{
let event = Event {
flags: interests_to_afd_flags(interests),
data: token.0 as u64,
};
state.lock().unwrap().set_event(event);
}
// FIXME: a sock which has_error true should not be re-added to
// the update queue because it's already there.
self.queue_state(state);
unsafe { self.update_sockets_events_if_polling() }
}
/// This function is called by register() and reregister() to start an
/// IOCTL_AFD_POLL operation corresponding to the registered events, but
/// only if necessary.
///
/// Since it is not possible to modify or synchronously cancel an AFD_POLL
/// operation, and there can be only one active AFD_POLL operation per
/// (socket, completion port) pair at any time, it is expensive to change
/// a socket's event registration after it has been submitted to the kernel.
///
/// Therefore, if no other threads are polling when interest in a socket
/// event is (re)registered, the socket is added to the 'update queue', but
/// the actual syscall to start the IOCTL_AFD_POLL operation is deferred
/// until just before the GetQueuedCompletionStatusEx() syscall is made.
///
/// However, when another thread is already blocked on
/// GetQueuedCompletionStatusEx() we tell the kernel about the registered
/// socket event(s) immediately.
unsafe fn update_sockets_events_if_polling(&self) -> io::Result<()> {
if self.is_polling.load(Ordering::Acquire) {
self.update_sockets_events()
} else {
Ok(())
}
}
fn queue_state(&self, sock_state: Pin<Arc<Mutex<SockState>>>) {
let mut update_queue = self.update_queue.lock().unwrap();
update_queue.push_back(sock_state);
}
fn _alloc_sock_for_rawsocket(
&self,
raw_socket: RawSocket,
) -> io::Result<Pin<Arc<Mutex<SockState>>>> {
let afd = self.afd_group.acquire()?;
Ok(Arc::pin(Mutex::new(SockState::new(raw_socket, afd)?)))
}
}
fn try_get_base_socket(raw_socket: RawSocket, ioctl: u32) -> Result<RawSocket, i32> {
let mut base_socket: RawSocket = 0;
let mut bytes: u32 = 0;
unsafe {
if WSAIoctl(
raw_socket as usize,
ioctl,
null_mut(),
0,
&mut base_socket as *mut _ as *mut c_void,
size_of::<RawSocket>() as u32,
&mut bytes,
null_mut(),
None,
) != SOCKET_ERROR
{
Ok(base_socket)
} else {
Err(WSAGetLastError())
}
}
}
fn get_base_socket(raw_socket: RawSocket) -> io::Result<RawSocket> {
let res = try_get_base_socket(raw_socket, SIO_BASE_HANDLE);
if let Ok(base_socket) = res {
return Ok(base_socket);
}
// The `SIO_BASE_HANDLE` should not be intercepted by LSPs, therefore
// it should not fail as long as `raw_socket` is a valid socket. See
// https://docs.microsoft.com/en-us/windows/win32/winsock/winsock-ioctls.
// However, at least one known LSP deliberately breaks it, so we try
// some alternative IOCTLs, starting with the most appropriate one.
for &ioctl in &[
SIO_BSP_HANDLE_SELECT,
SIO_BSP_HANDLE_POLL,
SIO_BSP_HANDLE,
] {
if let Ok(base_socket) = try_get_base_socket(raw_socket, ioctl) {
// Since we know now that we're dealing with an LSP (otherwise
// SIO_BASE_HANDLE would't have failed), only return any result
// when it is different from the original `raw_socket`.
if base_socket != raw_socket {
return Ok(base_socket);
}
}
}
// If the alternative IOCTLs also failed, return the original error.
let os_error = res.unwrap_err();
let err = io::Error::from_raw_os_error(os_error);
Err(err)
}
}
impl Drop for SelectorInner {
fn drop(&mut self) {
loop {
let events_num: usize;
let mut statuses: [CompletionStatus; 1024] = [CompletionStatus::zero(); 1024];
let result = self
.cp
.get_many(&mut statuses, Some(std::time::Duration::from_millis(0)));
match result {
Ok(iocp_events) => {
events_num = iocp_events.iter().len();
for iocp_event in iocp_events.iter() {
if iocp_event.overlapped().is_null() {
// Custom event
} else if iocp_event.token() % 2 == 1 {
// Named pipe, dispatch the event so it can release resources
let callback = unsafe {
(*(iocp_event.overlapped() as *mut super::Overlapped)).callback
};
callback(iocp_event.entry(), None);
} else {
// drain sock state to release memory of Arc reference
let _sock_state = from_overlapped(iocp_event.overlapped());
}
}
}
Err(_) => {
break;
}
}
if events_num == 0 {
// continue looping until all completion statuses have been drained
break;
}
}
self.afd_group.release_unused_afd();
}
}
cfg_net! {
fn interests_to_afd_flags(interests: Interest) -> u32 {
let mut flags = 0;
if interests.is_readable() {
flags |= READABLE_FLAGS | READ_CLOSED_FLAGS | ERROR_FLAGS;
}
if interests.is_writable() {
flags |= WRITABLE_FLAGS | WRITE_CLOSED_FLAGS | ERROR_FLAGS;
}
flags
}
}

View file

@ -0,0 +1,66 @@
use std::io;
use std::net::{self, SocketAddr};
use std::os::windows::io::AsRawSocket;
use windows_sys::Win32::Networking::WinSock::{self, SOCKET, SOCKET_ERROR, SOCK_STREAM};
use crate::sys::windows::net::{new_ip_socket, socket_addr};
pub(crate) fn new_for_addr(address: SocketAddr) -> io::Result<SOCKET> {
new_ip_socket(address, SOCK_STREAM)
}
pub(crate) fn bind(socket: &net::TcpListener, addr: SocketAddr) -> io::Result<()> {
use WinSock::bind;
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(
bind(
socket.as_raw_socket() as _,
raw_addr.as_ptr(),
raw_addr_length
),
PartialEq::eq,
SOCKET_ERROR
)?;
Ok(())
}
pub(crate) fn connect(socket: &net::TcpStream, addr: SocketAddr) -> io::Result<()> {
use WinSock::connect;
let (raw_addr, raw_addr_length) = socket_addr(&addr);
let res = syscall!(
connect(
socket.as_raw_socket() as _,
raw_addr.as_ptr(),
raw_addr_length
),
PartialEq::eq,
SOCKET_ERROR
);
match res {
Err(err) if err.kind() != io::ErrorKind::WouldBlock => Err(err),
_ => Ok(()),
}
}
pub(crate) fn listen(socket: &net::TcpListener, backlog: u32) -> io::Result<()> {
use std::convert::TryInto;
use WinSock::listen;
let backlog = backlog.try_into().unwrap_or(i32::max_value());
syscall!(
listen(socket.as_raw_socket() as _, backlog),
PartialEq::eq,
SOCKET_ERROR
)?;
Ok(())
}
pub(crate) fn accept(listener: &net::TcpListener) -> io::Result<(net::TcpStream, SocketAddr)> {
// The non-blocking state of `listener` is inherited. See
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-accept#remarks.
listener.accept()
}

View file

@ -0,0 +1,46 @@
use std::io;
use std::mem::{self, MaybeUninit};
use std::net::{self, SocketAddr};
use std::os::windows::io::{AsRawSocket, FromRawSocket};
use std::os::windows::raw::SOCKET as StdSocket; // windows-sys uses usize, stdlib uses u32/u64.
use crate::sys::windows::net::{new_ip_socket, socket_addr};
use windows_sys::Win32::Networking::WinSock::{
bind as win_bind, getsockopt, IPPROTO_IPV6, IPV6_V6ONLY, SOCKET_ERROR, SOCK_DGRAM,
};
pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
let raw_socket = new_ip_socket(addr, SOCK_DGRAM)?;
let socket = unsafe { net::UdpSocket::from_raw_socket(raw_socket as StdSocket) };
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(
win_bind(raw_socket, raw_addr.as_ptr(), raw_addr_length),
PartialEq::eq,
SOCKET_ERROR
)?;
Ok(socket)
}
pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
let mut optval: MaybeUninit<i32> = MaybeUninit::uninit();
let mut optlen = mem::size_of::<i32>() as i32;
syscall!(
getsockopt(
socket.as_raw_socket() as usize,
IPPROTO_IPV6 as i32,
IPV6_V6ONLY as i32,
optval.as_mut_ptr().cast(),
&mut optlen,
),
PartialEq::eq,
SOCKET_ERROR
)?;
debug_assert_eq!(optlen as usize, mem::size_of::<i32>());
// Safety: `getsockopt` initialised `optval` for us.
let optval = unsafe { optval.assume_init() };
Ok(optval != 0)
}

View file

@ -0,0 +1,29 @@
use crate::sys::windows::Event;
use crate::sys::windows::Selector;
use crate::Token;
use super::iocp::CompletionPort;
use std::io;
use std::sync::Arc;
#[derive(Debug)]
pub struct Waker {
token: Token,
port: Arc<CompletionPort>,
}
impl Waker {
pub fn new(selector: &Selector, token: Token) -> io::Result<Waker> {
Ok(Waker {
token,
port: selector.clone_port(),
})
}
pub fn wake(&self) -> io::Result<()> {
let mut ev = Event::new(self.token);
ev.set_readable();
self.port.post(ev.to_completion_status())
}
}

138
third-party/vendor/mio/src/token.rs vendored Normal file
View file

@ -0,0 +1,138 @@
/// Associates readiness events with [`event::Source`]s.
///
/// `Token` is a wrapper around `usize` and is used as an argument to
/// [`Registry::register`] and [`Registry::reregister`].
///
/// See [`Poll`] for more documentation on polling.
///
/// [`event::Source`]: ./event/trait.Source.html
/// [`Poll`]: struct.Poll.html
/// [`Registry::register`]: struct.Registry.html#method.register
/// [`Registry::reregister`]: struct.Registry.html#method.reregister
///
/// # Example
///
/// Using `Token` to track which socket generated the event. In this example,
/// `HashMap` is used, but usually something like [`slab`] is better.
///
/// [`slab`]: https://crates.io/crates/slab
///
#[cfg_attr(all(feature = "os-poll", feature = "net"), doc = "```")]
#[cfg_attr(not(all(feature = "os-poll", feature = "net")), doc = "```ignore")]
/// # use std::error::Error;
/// # fn main() -> Result<(), Box<dyn Error>> {
/// use mio::{Events, Interest, Poll, Token};
/// use mio::net::TcpListener;
///
/// use std::thread;
/// use std::io::{self, Read};
/// use std::collections::HashMap;
///
/// // After this number of sockets is accepted, the server will shutdown.
/// const MAX_SOCKETS: usize = 32;
///
/// // Pick a token that will not be used by any other socket and use that one
/// // for the listener.
/// const LISTENER: Token = Token(1024);
///
/// // Used to store the sockets.
/// let mut sockets = HashMap::new();
///
/// // This is used to generate a unique token for a socket
/// let mut next_socket_index = 0;
///
/// // The `Poll` instance
/// let mut poll = Poll::new()?;
///
/// // Tcp listener
/// let mut listener = TcpListener::bind("127.0.0.1:0".parse()?)?;
///
/// // Register the listener
/// poll.registry().register(&mut listener, LISTENER, Interest::READABLE)?;
///
/// // Spawn a thread that will connect a bunch of sockets then close them
/// let addr = listener.local_addr()?;
/// thread::spawn(move || {
/// use std::net::TcpStream;
///
/// // +1 here is to connect an extra socket to signal the socket to close
/// for _ in 0..(MAX_SOCKETS+1) {
/// // Connect then drop the socket
/// let _ = TcpStream::connect(addr).unwrap();
/// }
/// });
///
/// // Event storage
/// let mut events = Events::with_capacity(1024);
///
/// // Read buffer, this will never actually get filled
/// let mut buf = [0; 256];
///
/// // The main event loop
/// loop {
/// // Wait for events
/// poll.poll(&mut events, None)?;
///
/// for event in &events {
/// match event.token() {
/// LISTENER => {
/// // Perform operations in a loop until `WouldBlock` is
/// // encountered.
/// loop {
/// match listener.accept() {
/// Ok((mut socket, _)) => {
/// // Shutdown the server
/// if next_socket_index == MAX_SOCKETS {
/// return Ok(());
/// }
///
/// // Get the token for the socket
/// let token = Token(next_socket_index);
/// next_socket_index += 1;
///
/// // Register the new socket w/ poll
/// poll.registry().register(&mut socket, token, Interest::READABLE)?;
///
/// // Store the socket
/// sockets.insert(token, socket);
/// }
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// // Socket is not ready anymore, stop accepting
/// break;
/// }
/// e => panic!("err={:?}", e), // Unexpected error
/// }
/// }
/// }
/// token => {
/// // Always operate in a loop
/// loop {
/// match sockets.get_mut(&token).unwrap().read(&mut buf) {
/// Ok(0) => {
/// // Socket is closed, remove it from the map
/// sockets.remove(&token);
/// break;
/// }
/// // Data is not actually sent in this example
/// Ok(_) => unreachable!(),
/// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
/// // Socket is not ready anymore, stop reading
/// break;
/// }
/// e => panic!("err={:?}", e), // Unexpected error
/// }
/// }
/// }
/// }
/// }
/// }
/// # }
/// ```
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Token(pub usize);
impl From<Token> for usize {
fn from(val: Token) -> usize {
val.0
}
}

96
third-party/vendor/mio/src/waker.rs vendored Normal file
View file

@ -0,0 +1,96 @@
use crate::{sys, Registry, Token};
use std::io;
/// Waker allows cross-thread waking of [`Poll`].
///
/// When created it will cause events with [`readable`] readiness and the
/// provided `token` if [`wake`] is called, possibly from another thread.
///
/// [`Poll`]: struct.Poll.html
/// [`readable`]: ./event/struct.Event.html#method.is_readable
/// [`wake`]: struct.Waker.html#method.wake
///
/// # Notes
///
/// `Waker` events are only guaranteed to be delivered while the `Waker` value
/// is alive.
///
/// Only a single `Waker` can be active per [`Poll`], if multiple threads need
/// access to the `Waker` it can be shared via for example an `Arc`. What
/// happens if multiple `Waker`s are registered with the same `Poll` is
/// unspecified.
///
/// # Implementation notes
///
/// On platforms that support kqueue this will use the `EVFILT_USER` event
/// filter, see [implementation notes of `Poll`] to see what platforms support
/// kqueue. On Linux it uses [eventfd].
///
/// [implementation notes of `Poll`]: struct.Poll.html#implementation-notes
/// [eventfd]: https://man7.org/linux/man-pages/man2/eventfd.2.html
///
/// # Examples
///
/// Wake a [`Poll`] instance from another thread.
///
#[cfg_attr(feature = "os-poll", doc = "```")]
#[cfg_attr(not(feature = "os-poll"), doc = "```ignore")]
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::thread;
/// use std::time::Duration;
/// use std::sync::Arc;
///
/// use mio::{Events, Token, Poll, Waker};
///
/// const WAKE_TOKEN: Token = Token(10);
///
/// let mut poll = Poll::new()?;
/// let mut events = Events::with_capacity(2);
///
/// let waker = Arc::new(Waker::new(poll.registry(), WAKE_TOKEN)?);
///
/// // We need to keep the Waker alive, so we'll create a clone for the
/// // thread we create below.
/// let waker1 = waker.clone();
/// let handle = thread::spawn(move || {
/// // Working hard, or hardly working?
/// thread::sleep(Duration::from_millis(500));
///
/// // Now we'll wake the queue on the other thread.
/// waker1.wake().expect("unable to wake");
/// });
///
/// // On our current thread we'll poll for events, without a timeout.
/// poll.poll(&mut events, None)?;
///
/// // After about 500 milliseconds we should be awoken by the other thread and
/// // get a single event.
/// assert!(!events.is_empty());
/// let waker_event = events.iter().next().unwrap();
/// assert!(waker_event.is_readable());
/// assert_eq!(waker_event.token(), WAKE_TOKEN);
/// # handle.join().unwrap();
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
pub struct Waker {
inner: sys::Waker,
}
impl Waker {
/// Create a new `Waker`.
pub fn new(registry: &Registry, token: Token) -> io::Result<Waker> {
#[cfg(debug_assertions)]
registry.register_waker();
sys::Waker::new(registry.selector(), token).map(|inner| Waker { inner })
}
/// Wake up the [`Poll`] associated with this `Waker`.
///
/// [`Poll`]: struct.Poll.html
pub fn wake(&self) -> io::Result<()> {
self.inner.wake()
}
}