Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

273
third-party/vendor/memmap2/src/advice.rs vendored Normal file
View file

@ -0,0 +1,273 @@
// The use statement is needed for the `cargo docs`
#[allow(unused_imports)]
use crate::{Mmap, MmapMut};
/// Values supported by [Mmap::advise] and [MmapMut::advise] functions.
/// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
#[repr(i32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum Advice {
/// **MADV_NORMAL**
///
/// No special treatment. This is the default.
Normal = libc::MADV_NORMAL,
/// **MADV_RANDOM**
///
/// Expect page references in random order. (Hence, read
/// ahead may be less useful than normally.)
Random = libc::MADV_RANDOM,
/// **MADV_SEQUENTIAL**
///
/// Expect page references in sequential order. (Hence, pages
/// in the given range can be aggressively read ahead, and may
/// be freed soon after they are accessed.)
Sequential = libc::MADV_SEQUENTIAL,
/// **MADV_WILLNEED**
///
/// Expect access in the near future. (Hence, it might be a
/// good idea to read some pages ahead.)
WillNeed = libc::MADV_WILLNEED,
/// **MADV_DONTNEED**
///
/// Do not expect access in the near future. (For the time
/// being, the application is finished with the given range,
/// so the kernel can free resources associated with it.)
///
/// After a successful MADV_DONTNEED operation, the semantics
/// of memory access in the specified region are changed:
/// subsequent accesses of pages in the range will succeed,
/// but will result in either repopulating the memory contents
/// from the up-to-date contents of the underlying mapped file
/// (for shared file mappings, shared anonymous mappings, and
/// shmem-based techniques such as System V shared memory
/// segments) or zero-fill-on-demand pages for anonymous
/// private mappings.
///
/// Note that, when applied to shared mappings, MADV_DONTNEED
/// might not lead to immediate freeing of the pages in the
/// range. The kernel is free to delay freeing the pages
/// until an appropriate moment. The resident set size (RSS)
/// of the calling process will be immediately reduced
/// however.
///
/// **MADV_DONTNEED** cannot be applied to locked pages, Huge TLB
/// pages, or VM_PFNMAP pages. (Pages marked with the kernel-
/// internal VM_PFNMAP flag are special memory areas that are
/// not managed by the virtual memory subsystem. Such pages
/// are typically created by device drivers that map the pages
/// into user space.)
DontNeed = libc::MADV_DONTNEED,
//
// The rest are Linux-specific
//
/// **MADV_FREE** - Linux (since Linux 4.5) and Darwin
///
/// The application no longer requires the pages in the range
/// specified by addr and len. The kernel can thus free these
/// pages, but the freeing could be delayed until memory
/// pressure occurs. For each of the pages that has been
/// marked to be freed but has not yet been freed, the free
/// operation will be canceled if the caller writes into the
/// page. After a successful MADV_FREE operation, any stale
/// data (i.e., dirty, unwritten pages) will be lost when the
/// kernel frees the pages. However, subsequent writes to
/// pages in the range will succeed and then kernel cannot
/// free those dirtied pages, so that the caller can always
/// see just written data. If there is no subsequent write,
/// the kernel can free the pages at any time. Once pages in
/// the range have been freed, the caller will see zero-fill-
/// on-demand pages upon subsequent page references.
///
/// The MADV_FREE operation can be applied only to private
/// anonymous pages (see mmap(2)). In Linux before version
/// 4.12, when freeing pages on a swapless system, the pages
/// in the given range are freed instantly, regardless of
/// memory pressure.
#[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios"))]
Free = libc::MADV_FREE,
/// **MADV_REMOVE** - Linux only (since Linux 2.6.16)
///
/// Free up a given range of pages and its associated backing
/// store. This is equivalent to punching a hole in the
/// corresponding byte range of the backing store (see
/// fallocate(2)). Subsequent accesses in the specified
/// address range will see bytes containing zero.
///
/// The specified address range must be mapped shared and
/// writable. This flag cannot be applied to locked pages,
/// Huge TLB pages, or VM_PFNMAP pages.
///
/// In the initial implementation, only tmpfs(5) was supported
/// **MADV_REMOVE**; but since Linux 3.5, any filesystem which
/// supports the fallocate(2) FALLOC_FL_PUNCH_HOLE mode also
/// supports MADV_REMOVE. Hugetlbfs fails with the error
/// EINVAL and other filesystems fail with the error
/// EOPNOTSUPP.
#[cfg(target_os = "linux")]
Remove = libc::MADV_REMOVE,
/// **MADV_DONTFORK** - Linux only (since Linux 2.6.16)
///
/// Do not make the pages in this range available to the child
/// after a fork(2). This is useful to prevent copy-on-write
/// semantics from changing the physical location of a page if
/// the parent writes to it after a fork(2). (Such page
/// relocations cause problems for hardware that DMAs into the
/// page.)
#[cfg(target_os = "linux")]
DontFork = libc::MADV_DONTFORK,
/// **MADV_DOFORK** - Linux only (since Linux 2.6.16)
///
/// Undo the effect of MADV_DONTFORK, restoring the default
/// behavior, whereby a mapping is inherited across fork(2).
#[cfg(target_os = "linux")]
DoFork = libc::MADV_DOFORK,
/// **MADV_MERGEABLE** - Linux only (since Linux 2.6.32)
///
/// Enable Kernel Samepage Merging (KSM) for the pages in the
/// range specified by addr and length. The kernel regularly
/// scans those areas of user memory that have been marked as
/// mergeable, looking for pages with identical content.
/// These are replaced by a single write-protected page (which
/// is automatically copied if a process later wants to update
/// the content of the page). KSM merges only private
/// anonymous pages (see mmap(2)).
///
/// The KSM feature is intended for applications that generate
/// many instances of the same data (e.g., virtualization
/// systems such as KVM). It can consume a lot of processing
/// power; use with care. See the Linux kernel source file
/// Documentation/admin-guide/mm/ksm.rst for more details.
///
/// The MADV_MERGEABLE and MADV_UNMERGEABLE operations are
/// available only if the kernel was configured with
/// CONFIG_KSM.
#[cfg(target_os = "linux")]
Mergeable = libc::MADV_MERGEABLE,
/// **MADV_UNMERGEABLE** - Linux only (since Linux 2.6.32)
///
/// Undo the effect of an earlier MADV_MERGEABLE operation on
/// the specified address range; KSM unmerges whatever pages
/// it had merged in the address range specified by addr and
/// length.
#[cfg(target_os = "linux")]
Unmergeable = libc::MADV_UNMERGEABLE,
/// **MADV_HUGEPAGE** - Linux only (since Linux 2.6.38)
///
/// Enable Transparent Huge Pages (THP) for pages in the range
/// specified by addr and length. Currently, Transparent Huge
/// Pages work only with private anonymous pages (see
/// mmap(2)). The kernel will regularly scan the areas marked
/// as huge page candidates to replace them with huge pages.
/// The kernel will also allocate huge pages directly when the
/// region is naturally aligned to the huge page size (see
/// posix_memalign(2)).
///
/// This feature is primarily aimed at applications that use
/// large mappings of data and access large regions of that
/// memory at a time (e.g., virtualization systems such as
/// QEMU). It can very easily waste memory (e.g., a 2 MB
/// mapping that only ever accesses 1 byte will result in 2 MB
/// of wired memory instead of one 4 KB page). See the Linux
/// kernel source file
/// Documentation/admin-guide/mm/transhuge.rst for more
/// details.
///
/// Most common kernels configurations provide MADV_HUGEPAGE-
/// style behavior by default, and thus MADV_HUGEPAGE is
/// normally not necessary. It is mostly intended for
/// embedded systems, where MADV_HUGEPAGE-style behavior may
/// not be enabled by default in the kernel. On such systems,
/// this flag can be used in order to selectively enable THP.
/// Whenever MADV_HUGEPAGE is used, it should always be in
/// regions of memory with an access pattern that the
/// developer knows in advance won't risk to increase the
/// memory footprint of the application when transparent
/// hugepages are enabled.
///
/// The MADV_HUGEPAGE and MADV_NOHUGEPAGE operations are
/// available only if the kernel was configured with
/// CONFIG_TRANSPARENT_HUGEPAGE.
#[cfg(target_os = "linux")]
HugePage = libc::MADV_HUGEPAGE,
/// **MADV_NOHUGEPAGE** - Linux only (since Linux 2.6.38)
///
/// Ensures that memory in the address range specified by addr
/// and length will not be backed by transparent hugepages.
#[cfg(target_os = "linux")]
NoHugePage = libc::MADV_NOHUGEPAGE,
/// **MADV_DONTDUMP** - Linux only (since Linux 3.4)
///
/// Exclude from a core dump those pages in the range
/// specified by addr and length. This is useful in
/// applications that have large areas of memory that are
/// known not to be useful in a core dump. The effect of
/// **MADV_DONTDUMP** takes precedence over the bit mask that is
/// set via the `/proc/[pid]/coredump_filter` file (see
/// core(5)).
#[cfg(target_os = "linux")]
DontDump = libc::MADV_DONTDUMP,
/// **MADV_DODUMP** - Linux only (since Linux 3.4)
///
/// Undo the effect of an earlier MADV_DONTDUMP.
#[cfg(target_os = "linux")]
DoDump = libc::MADV_DODUMP,
/// **MADV_HWPOISON** - Linux only (since Linux 2.6.32)
///
/// Poison the pages in the range specified by addr and length
/// and handle subsequent references to those pages like a
/// hardware memory corruption. This operation is available
/// only for privileged (CAP_SYS_ADMIN) processes. This
/// operation may result in the calling process receiving a
/// SIGBUS and the page being unmapped.
///
/// This feature is intended for testing of memory error-
/// handling code; it is available only if the kernel was
/// configured with CONFIG_MEMORY_FAILURE.
#[cfg(target_os = "linux")]
HwPoison = libc::MADV_HWPOISON,
/// **MADV_ZERO_WIRED_PAGES** - Darwin only
///
/// Indicates that the application would like the wired pages in this address range to be
/// zeroed out if the address range is deallocated without first unwiring the pages (i.e.
/// a munmap(2) without a preceding munlock(2) or the application quits). This is used
/// with madvise() system call.
#[cfg(any(target_os = "macos", target_os = "ios"))]
ZeroWiredPages = libc::MADV_ZERO_WIRED_PAGES,
/// **MADV_FREE_REUSABLE** - Darwin only
///
/// Behaves like **MADV_FREE**, but the freed pages are accounted for in the RSS of the process.
#[cfg(any(target_os = "macos", target_os = "ios"))]
FreeReusable = libc::MADV_FREE_REUSABLE,
/// **MADV_FREE_REUSE** - Darwin only
///
/// Marks a memory region previously freed by **MADV_FREE_REUSABLE** as non-reusable, accounts
/// for the pages in the RSS of the process. Pages that have been freed will be replaced by
/// zero-filled pages on demand, other pages will be left as is.
#[cfg(any(target_os = "macos", target_os = "ios"))]
FreeReuse = libc::MADV_FREE_REUSE,
}
// Future expansion:
// MADV_SOFT_OFFLINE (since Linux 2.6.33)
// MADV_WIPEONFORK (since Linux 4.14)
// MADV_KEEPONFORK (since Linux 4.14)
// MADV_COLD (since Linux 5.4)
// MADV_PAGEOUT (since Linux 5.4)

1754
third-party/vendor/memmap2/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load diff

81
third-party/vendor/memmap2/src/stub.rs vendored Normal file
View file

@ -0,0 +1,81 @@
use std::fs::File;
use std::io;
// A stable alternative to https://doc.rust-lang.org/stable/std/primitive.never.html
enum Never {}
pub struct MmapInner {
never: Never,
}
impl MmapInner {
fn new() -> io::Result<MmapInner> {
Err(io::Error::new(
io::ErrorKind::Other,
"platform not supported",
))
}
pub fn map(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_exec(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_mut(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_copy(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_copy_read_only(_: usize, _: &File, _: u64, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_anon(_: usize, _: bool, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn flush(&self, _: usize, _: usize) -> io::Result<()> {
match self.never {}
}
pub fn flush_async(&self, _: usize, _: usize) -> io::Result<()> {
match self.never {}
}
pub fn make_read_only(&mut self) -> io::Result<()> {
match self.never {}
}
pub fn make_exec(&mut self) -> io::Result<()> {
match self.never {}
}
pub fn make_mut(&mut self) -> io::Result<()> {
match self.never {}
}
#[inline]
pub fn ptr(&self) -> *const u8 {
match self.never {}
}
#[inline]
pub fn mut_ptr(&mut self) -> *mut u8 {
match self.never {}
}
#[inline]
pub fn len(&self) -> usize {
match self.never {}
}
}
pub fn file_len(file: &File) -> io::Result<u64> {
Ok(file.metadata()?.len())
}

318
third-party/vendor/memmap2/src/unix.rs vendored Normal file
View file

@ -0,0 +1,318 @@
extern crate libc;
use std::fs::File;
use std::mem::ManuallyDrop;
use std::os::unix::io::{FromRawFd, RawFd};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{io, ptr};
use crate::advice::Advice;
#[cfg(any(
all(target_os = "linux", not(target_arch = "mips")),
target_os = "freebsd",
target_os = "android"
))]
const MAP_STACK: libc::c_int = libc::MAP_STACK;
#[cfg(not(any(
all(target_os = "linux", not(target_arch = "mips")),
target_os = "freebsd",
target_os = "android"
)))]
const MAP_STACK: libc::c_int = 0;
#[cfg(any(target_os = "linux", target_os = "android"))]
const MAP_POPULATE: libc::c_int = libc::MAP_POPULATE;
#[cfg(not(any(target_os = "linux", target_os = "android")))]
const MAP_POPULATE: libc::c_int = 0;
pub struct MmapInner {
ptr: *mut libc::c_void,
len: usize,
}
impl MmapInner {
/// Creates a new `MmapInner`.
///
/// This is a thin wrapper around the `mmap` sytem call.
fn new(
len: usize,
prot: libc::c_int,
flags: libc::c_int,
file: RawFd,
offset: u64,
) -> io::Result<MmapInner> {
let alignment = offset % page_size() as u64;
let aligned_offset = offset - alignment;
let aligned_len = len + alignment as usize;
// `libc::mmap` does not support zero-size mappings. POSIX defines:
//
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html
// > If `len` is zero, `mmap()` shall fail and no mapping shall be established.
//
// So if we would create such a mapping, crate a one-byte mapping instead:
let aligned_len = aligned_len.max(1);
// Note that in that case `MmapInner::len` is still set to zero,
// and `Mmap` will still dereferences to an empty slice.
//
// If this mapping is backed by an empty file, we create a mapping larger than the file.
// This is unusual but well-defined. On the same man page, POSIX further defines:
//
// > The `mmap()` function can be used to map a region of memory that is larger
// > than the current size of the object.
//
// (The object here is the file.)
//
// > Memory access within the mapping but beyond the current end of the underlying
// > objects may result in SIGBUS signals being sent to the process. The reason for this
// > is that the size of the object can be manipulated by other processes and can change
// > at any moment. The implementation should tell the application that a memory reference
// > is outside the object where this can be detected; otherwise, written data may be lost
// > and read data may not reflect actual data in the object.
//
// Because `MmapInner::len` is not incremented, this increment of `aligned_len`
// will not allow accesses past the end of the file and will not cause SIGBUS.
//
// (SIGBUS is still possible by mapping a non-empty file and then truncating it
// to a shorter size, but that is unrelated to this handling of empty files.)
unsafe {
let ptr = libc::mmap(
ptr::null_mut(),
aligned_len as libc::size_t,
prot,
flags,
file,
aligned_offset as libc::off_t,
);
if ptr == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(MmapInner {
ptr: ptr.offset(alignment as isize),
len,
})
}
}
}
pub fn map(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ,
libc::MAP_SHARED | populate,
file,
offset,
)
}
pub fn map_exec(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_EXEC,
libc::MAP_SHARED | populate,
file,
offset,
)
}
pub fn map_mut(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED | populate,
file,
offset,
)
}
pub fn map_copy(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | populate,
file,
offset,
)
}
pub fn map_copy_read_only(
len: usize,
file: RawFd,
offset: u64,
populate: bool,
) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ,
libc::MAP_PRIVATE | populate,
file,
offset,
)
}
/// Open an anonymous memory map.
pub fn map_anon(len: usize, stack: bool, populate: bool) -> io::Result<MmapInner> {
let stack = if stack { MAP_STACK } else { 0 };
let populate = if populate { MAP_POPULATE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | libc::MAP_ANON | stack | populate,
-1,
0,
)
}
pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
let alignment = (self.ptr as usize + offset) % page_size();
let offset = offset as isize - alignment as isize;
let len = len + alignment;
let result =
unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_SYNC) };
if result == 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
let alignment = (self.ptr as usize + offset) % page_size();
let offset = offset as isize - alignment as isize;
let len = len + alignment;
let result =
unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_ASYNC) };
if result == 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
fn mprotect(&mut self, prot: libc::c_int) -> io::Result<()> {
unsafe {
let alignment = self.ptr as usize % page_size();
let ptr = self.ptr.offset(-(alignment as isize));
let len = self.len + alignment;
let len = len.max(1);
if libc::mprotect(ptr, len, prot) == 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
}
pub fn make_read_only(&mut self) -> io::Result<()> {
self.mprotect(libc::PROT_READ)
}
pub fn make_exec(&mut self) -> io::Result<()> {
self.mprotect(libc::PROT_READ | libc::PROT_EXEC)
}
pub fn make_mut(&mut self) -> io::Result<()> {
self.mprotect(libc::PROT_READ | libc::PROT_WRITE)
}
#[inline]
pub fn ptr(&self) -> *const u8 {
self.ptr as *const u8
}
#[inline]
pub fn mut_ptr(&mut self) -> *mut u8 {
self.ptr as *mut u8
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
pub fn advise(&self, advice: Advice, offset: usize, len: usize) -> io::Result<()> {
let alignment = (self.ptr as usize + offset) % page_size();
let offset = offset as isize - alignment as isize;
let len = len + alignment;
unsafe {
if libc::madvise(self.ptr.offset(offset), len, advice as i32) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
pub fn lock(&self) -> io::Result<()> {
unsafe {
if libc::mlock(self.ptr, self.len) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
pub fn unlock(&self) -> io::Result<()> {
unsafe {
if libc::munlock(self.ptr, self.len) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
}
impl Drop for MmapInner {
fn drop(&mut self) {
let alignment = self.ptr as usize % page_size();
let len = self.len + alignment;
let len = len.max(1);
// Any errors during unmapping/closing are ignored as the only way
// to report them would be through panicking which is highly discouraged
// in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
unsafe {
let ptr = self.ptr.offset(-(alignment as isize));
libc::munmap(ptr, len as libc::size_t);
}
}
}
unsafe impl Sync for MmapInner {}
unsafe impl Send for MmapInner {}
fn page_size() -> usize {
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
match PAGE_SIZE.load(Ordering::Relaxed) {
0 => {
let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
PAGE_SIZE.store(page_size, Ordering::Relaxed);
page_size
}
page_size => page_size,
}
}
pub fn file_len(file: RawFd) -> io::Result<u64> {
// SAFETY: We must not close the passed-in fd by dropping the File we create,
// we ensure this by immediately wrapping it in a ManuallyDrop.
unsafe {
let file = ManuallyDrop::new(File::from_raw_fd(file));
Ok(file.metadata()?.len())
}
}

View file

@ -0,0 +1,516 @@
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use std::fs::File;
use std::mem::ManuallyDrop;
use std::os::raw::c_void;
use std::os::windows::io::{FromRawHandle, RawHandle};
use std::{io, mem, ptr};
type BOOL = i32;
type WORD = u16;
type DWORD = u32;
type WCHAR = u16;
type HANDLE = *mut c_void;
type LPHANDLE = *mut HANDLE;
type LPVOID = *mut c_void;
type LPCVOID = *const c_void;
type ULONG_PTR = usize;
type SIZE_T = ULONG_PTR;
type LPCWSTR = *const WCHAR;
type PDWORD = *mut DWORD;
type DWORD_PTR = ULONG_PTR;
type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
type LPSYSTEM_INFO = *mut SYSTEM_INFO;
const INVALID_HANDLE_VALUE: HANDLE = -1isize as HANDLE;
const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
const STANDARD_RIGHTS_REQUIRED: DWORD = 0x000F0000;
const SECTION_QUERY: DWORD = 0x0001;
const SECTION_MAP_WRITE: DWORD = 0x0002;
const SECTION_MAP_READ: DWORD = 0x0004;
const SECTION_MAP_EXECUTE: DWORD = 0x0008;
const SECTION_EXTEND_SIZE: DWORD = 0x0010;
const SECTION_MAP_EXECUTE_EXPLICIT: DWORD = 0x0020;
const SECTION_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED
| SECTION_QUERY
| SECTION_MAP_WRITE
| SECTION_MAP_READ
| SECTION_MAP_EXECUTE
| SECTION_EXTEND_SIZE;
const PAGE_READONLY: DWORD = 0x02;
const PAGE_READWRITE: DWORD = 0x04;
const PAGE_WRITECOPY: DWORD = 0x08;
const PAGE_EXECUTE_READ: DWORD = 0x20;
const PAGE_EXECUTE_READWRITE: DWORD = 0x40;
const PAGE_EXECUTE_WRITECOPY: DWORD = 0x80;
const FILE_MAP_WRITE: DWORD = SECTION_MAP_WRITE;
const FILE_MAP_READ: DWORD = SECTION_MAP_READ;
const FILE_MAP_ALL_ACCESS: DWORD = SECTION_ALL_ACCESS;
const FILE_MAP_EXECUTE: DWORD = SECTION_MAP_EXECUTE_EXPLICIT;
const FILE_MAP_COPY: DWORD = 0x00000001;
#[repr(C)]
struct SECURITY_ATTRIBUTES {
nLength: DWORD,
lpSecurityDescriptor: LPVOID,
bInheritHandle: BOOL,
}
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: WORD,
wReserved: WORD,
dwPageSize: DWORD,
lpMinimumApplicationAddress: LPVOID,
lpMaximumApplicationAddress: LPVOID,
dwActiveProcessorMask: DWORD_PTR,
dwNumberOfProcessors: DWORD,
dwProcessorType: DWORD,
dwAllocationGranularity: DWORD,
wProcessorLevel: WORD,
wProcessorRevision: WORD,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct FILETIME {
pub dwLowDateTime: DWORD,
pub dwHighDateTime: DWORD,
}
extern "system" {
fn GetCurrentProcess() -> HANDLE;
fn CloseHandle(hObject: HANDLE) -> BOOL;
fn DuplicateHandle(
hSourceProcessHandle: HANDLE,
hSourceHandle: HANDLE,
hTargetProcessHandle: HANDLE,
lpTargetHandle: LPHANDLE,
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
dwOptions: DWORD,
) -> BOOL;
fn CreateFileMappingW(
hFile: HANDLE,
lpFileMappingAttributes: LPSECURITY_ATTRIBUTES,
flProtect: DWORD,
dwMaximumSizeHigh: DWORD,
dwMaximumSizeLow: DWORD,
lpName: LPCWSTR,
) -> HANDLE;
fn FlushFileBuffers(hFile: HANDLE) -> BOOL;
fn FlushViewOfFile(lpBaseAddress: LPCVOID, dwNumberOfBytesToFlush: SIZE_T) -> BOOL;
fn UnmapViewOfFile(lpBaseAddress: LPCVOID) -> BOOL;
fn MapViewOfFile(
hFileMappingObject: HANDLE,
dwDesiredAccess: DWORD,
dwFileOffsetHigh: DWORD,
dwFileOffsetLow: DWORD,
dwNumberOfBytesToMap: SIZE_T,
) -> LPVOID;
fn VirtualProtect(
lpAddress: LPVOID,
dwSize: SIZE_T,
flNewProtect: DWORD,
lpflOldProtect: PDWORD,
) -> BOOL;
fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
}
/// Returns a fixed pointer that is valid for `slice::from_raw_parts::<u8>` with `len == 0`.
fn empty_slice_ptr() -> *mut c_void {
std::ptr::NonNull::<u8>::dangling().cast().as_ptr()
}
pub struct MmapInner {
handle: Option<RawHandle>,
ptr: *mut c_void,
len: usize,
copy: bool,
}
impl MmapInner {
/// Creates a new `MmapInner`.
///
/// This is a thin wrapper around the `CreateFileMappingW` and `MapViewOfFile` system calls.
pub fn new(
handle: RawHandle,
protect: DWORD,
access: DWORD,
offset: u64,
len: usize,
copy: bool,
) -> io::Result<MmapInner> {
let alignment = offset % allocation_granularity() as u64;
let aligned_offset = offset - alignment as u64;
let aligned_len = len + alignment as usize;
if aligned_len == 0 {
// `CreateFileMappingW` documents:
//
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-createfilemappingw
// > An attempt to map a file with a length of 0 (zero) fails with an error code
// > of ERROR_FILE_INVALID. Applications should test for files with a length of 0
// > (zero) and reject those files.
//
// For such files, dont create a mapping at all and use a marker pointer instead.
return Ok(MmapInner {
handle: None,
ptr: empty_slice_ptr(),
len: 0,
copy,
});
}
unsafe {
let mapping = CreateFileMappingW(handle, ptr::null_mut(), protect, 0, 0, ptr::null());
if mapping.is_null() {
return Err(io::Error::last_os_error());
}
let ptr = MapViewOfFile(
mapping,
access,
(aligned_offset >> 16 >> 16) as DWORD,
(aligned_offset & 0xffffffff) as DWORD,
aligned_len as SIZE_T,
);
CloseHandle(mapping);
if ptr.is_null() {
return Err(io::Error::last_os_error());
}
let mut new_handle = 0 as RawHandle;
let cur_proc = GetCurrentProcess();
let ok = DuplicateHandle(
cur_proc,
handle,
cur_proc,
&mut new_handle,
0,
0,
DUPLICATE_SAME_ACCESS,
);
if ok == 0 {
UnmapViewOfFile(ptr);
return Err(io::Error::last_os_error());
}
Ok(MmapInner {
handle: Some(new_handle),
ptr: ptr.offset(alignment as isize),
len: len as usize,
copy,
})
}
}
pub fn map(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
) -> io::Result<MmapInner> {
let write = protection_supported(handle, PAGE_READWRITE);
let exec = protection_supported(handle, PAGE_EXECUTE_READ);
let mut access = FILE_MAP_READ;
let protection = match (write, exec) {
(true, true) => {
access |= FILE_MAP_WRITE | FILE_MAP_EXECUTE;
PAGE_EXECUTE_READWRITE
}
(true, false) => {
access |= FILE_MAP_WRITE;
PAGE_READWRITE
}
(false, true) => {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_READ
}
(false, false) => PAGE_READONLY,
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
if write || exec {
inner.make_read_only()?;
}
Ok(inner)
}
pub fn map_exec(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
) -> io::Result<MmapInner> {
let write = protection_supported(handle, PAGE_READWRITE);
let mut access = FILE_MAP_READ | FILE_MAP_EXECUTE;
let protection = if write {
access |= FILE_MAP_WRITE;
PAGE_EXECUTE_READWRITE
} else {
PAGE_EXECUTE_READ
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
if write {
inner.make_exec()?;
}
Ok(inner)
}
pub fn map_mut(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
) -> io::Result<MmapInner> {
let exec = protection_supported(handle, PAGE_EXECUTE_READ);
let mut access = FILE_MAP_READ | FILE_MAP_WRITE;
let protection = if exec {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_READWRITE
} else {
PAGE_READWRITE
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
if exec {
inner.make_mut()?;
}
Ok(inner)
}
pub fn map_copy(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
) -> io::Result<MmapInner> {
let exec = protection_supported(handle, PAGE_EXECUTE_READWRITE);
let mut access = FILE_MAP_COPY;
let protection = if exec {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_WRITECOPY
} else {
PAGE_WRITECOPY
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, true)?;
if exec {
inner.make_mut()?;
}
Ok(inner)
}
pub fn map_copy_read_only(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
) -> io::Result<MmapInner> {
let write = protection_supported(handle, PAGE_READWRITE);
let exec = protection_supported(handle, PAGE_EXECUTE_READ);
let mut access = FILE_MAP_COPY;
let protection = if exec {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_WRITECOPY
} else {
PAGE_WRITECOPY
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, true)?;
if write || exec {
inner.make_read_only()?;
}
Ok(inner)
}
pub fn map_anon(len: usize, _stack: bool, _populate: bool) -> io::Result<MmapInner> {
// Ensure a non-zero length for the underlying mapping
let mapped_len = len.max(1);
unsafe {
// Create a mapping and view with maximum access permissions, then use `VirtualProtect`
// to set the actual `Protection`. This way, we can set more permissive protection later
// on.
// Also see https://msdn.microsoft.com/en-us/library/windows/desktop/aa366537.aspx
let mapping = CreateFileMappingW(
INVALID_HANDLE_VALUE,
ptr::null_mut(),
PAGE_EXECUTE_READWRITE,
(mapped_len >> 16 >> 16) as DWORD,
(mapped_len & 0xffffffff) as DWORD,
ptr::null(),
);
if mapping.is_null() {
return Err(io::Error::last_os_error());
}
let access = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
let ptr = MapViewOfFile(mapping, access, 0, 0, mapped_len as SIZE_T);
CloseHandle(mapping);
if ptr.is_null() {
return Err(io::Error::last_os_error());
}
let mut old = 0;
let result = VirtualProtect(ptr, mapped_len as SIZE_T, PAGE_READWRITE, &mut old);
if result != 0 {
Ok(MmapInner {
handle: None,
ptr,
len: len as usize,
copy: false,
})
} else {
Err(io::Error::last_os_error())
}
}
}
pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
self.flush_async(offset, len)?;
if let Some(handle) = self.handle {
let ok = unsafe { FlushFileBuffers(handle) };
if ok == 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
if self.ptr == empty_slice_ptr() {
return Ok(());
}
let result = unsafe { FlushViewOfFile(self.ptr.add(offset), len as SIZE_T) };
if result != 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
fn virtual_protect(&mut self, protect: DWORD) -> io::Result<()> {
if self.ptr == empty_slice_ptr() {
return Ok(());
}
unsafe {
let alignment = self.ptr as usize % allocation_granularity();
let ptr = self.ptr.offset(-(alignment as isize));
let aligned_len = self.len as SIZE_T + alignment as SIZE_T;
let mut old = 0;
let result = VirtualProtect(ptr, aligned_len, protect, &mut old);
if result != 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
}
pub fn make_read_only(&mut self) -> io::Result<()> {
self.virtual_protect(PAGE_READONLY)
}
pub fn make_exec(&mut self) -> io::Result<()> {
if self.copy {
self.virtual_protect(PAGE_EXECUTE_WRITECOPY)
} else {
self.virtual_protect(PAGE_EXECUTE_READ)
}
}
pub fn make_mut(&mut self) -> io::Result<()> {
if self.copy {
self.virtual_protect(PAGE_WRITECOPY)
} else {
self.virtual_protect(PAGE_READWRITE)
}
}
#[inline]
pub fn ptr(&self) -> *const u8 {
self.ptr as *const u8
}
#[inline]
pub fn mut_ptr(&mut self) -> *mut u8 {
self.ptr as *mut u8
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
}
impl Drop for MmapInner {
fn drop(&mut self) {
if self.ptr == empty_slice_ptr() {
return;
}
let alignment = self.ptr as usize % allocation_granularity();
// Any errors during unmapping/closing are ignored as the only way
// to report them would be through panicking which is highly discouraged
// in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
unsafe {
let ptr = self.ptr.offset(-(alignment as isize));
UnmapViewOfFile(ptr);
if let Some(handle) = self.handle {
CloseHandle(handle);
}
}
}
}
unsafe impl Sync for MmapInner {}
unsafe impl Send for MmapInner {}
fn protection_supported(handle: RawHandle, protection: DWORD) -> bool {
unsafe {
let mapping = CreateFileMappingW(handle, ptr::null_mut(), protection, 0, 0, ptr::null());
if mapping.is_null() {
return false;
}
CloseHandle(mapping);
true
}
}
fn allocation_granularity() -> usize {
unsafe {
let mut info = mem::zeroed();
GetSystemInfo(&mut info);
info.dwAllocationGranularity as usize
}
}
pub fn file_len(handle: RawHandle) -> io::Result<u64> {
// SAFETY: We must not close the passed-in fd by dropping the File we create,
// we ensure this by immediately wrapping it in a ManuallyDrop.
unsafe {
let file = ManuallyDrop::new(File::from_raw_handle(handle));
Ok(file.metadata()?.len())
}
}