Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

View file

@ -0,0 +1 @@
{"files":{"Cargo.toml":"973fd894e8985b9b1fda4bf34f0e71af54d69a61fe6d8b7f1bbe443eb5863bcc","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"cabc15d1a4d23868718052a4e5eaa1fd3f493ef6fb16c499210a48588651a805","build.rs":"4a9c4ac3759572e17de312a9d3f4ced3b6fd3c71811729e5a8d06bfbd1ac8f82","callbacks.rs":"aa3d8789241e965eb07a0103c5fd7b81091431458d83c2eaa9611c3d54e909f2","clang.rs":"a38d153516c6685b46723010793b2f4e8b16468f3cd3296781dea6e485cd66da","codegen/bitfield_unit.rs":"fddeaeab5859f4e82081865595b7705f5c0774d997df95fa5c655b81b9cae125","codegen/bitfield_unit_tests.rs":"9df86490de5e9d66ccea583dcb686dd440375dc1a3c3cf89a89d5de3883bf28a","codegen/dyngen.rs":"dd50fa3b4eaa5f4e479d4755cf109961e1fcd99fb32c93316da1f8eab80f37d9","codegen/error.rs":"5e308b8c54b68511fc8ea2ad15ddac510172c4ff460a80a265336440b0c9653d","codegen/helpers.rs":"5f24007a09e50db7bd2b49b07100cfed649c7b22232558e28c99c759d5d14152","codegen/impl_debug.rs":"71d8e28873ba2de466f2637a824746963702f0511728037d72ee5670c51194cb","codegen/impl_partialeq.rs":"f4599e32c66179ae515a6518a3e94b686689cf59f7dd9ab618c3fb69f17d2c77","codegen/mod.rs":"b35b9371266633e8d813aa8bc4c44ee49cacf1b6e740973db252481c0d3799c4","codegen/postprocessing/merge_extern_blocks.rs":"be5c5ff6d3d3d4e940814c4dc77f8d687aa6b053dcfbd2185c09616ba9022bf2","codegen/postprocessing/mod.rs":"d1e8c2864d5194a4cb2f6731c0ef582a55b363df9c4f888c26942ff37f728e1c","codegen/postprocessing/sort_semantically.rs":"3071dd509f2e5d3dfd99cafa6ee19bbacb9fec1c61a0b3f6e284a75c1f2c3db6","codegen/struct_layout.rs":"7dfc814d3c914a0c53d8ed031baf543dab1def5959e8ab85220cad69a506383a","deps.rs":"5ee2332fdb10325f3b0a0c6d9ba94e13eb631ef39e955fa958afc3625bdb5448","extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","features.rs":"af20bd617cce27f6716ab3d61a1c8ddfaa46227f4a0d435b08a19d5f277cf8ba","ir/analysis/derive.rs":"ec514585eb40f0e3306bf3302aec0152a2e95e8dce13a67c36b5f00c0cbb76ef","ir/analysis/has_destructor.rs":"7a82f01e7e0595a31b56f7c398fa3019b3fe9100a2a73b56768f7e6943dcc3ce","ir/analysis/has_float.rs":"58ea1e38a59ef208400fd65d426cb5b288949df2d383b3a194fa01b99d2a87fa","ir/analysis/has_type_param_in_array.rs":"d1b9eb119dc14f662eb9bd1394c859f485479e4912589709cdd33f6752094e22","ir/analysis/has_vtable.rs":"368cf30fbe3fab7190fab48718b948caac5da8c9e797b709488716b919315636","ir/analysis/mod.rs":"0c10d8eeb26d7e6f4ce18e9dfb74ea1f9deff5cd350298aca3dc1041b17c20c4","ir/analysis/sizedness.rs":"944443d6aab35d2dd80e4f5e59176ac1e1c463ba2f0eb25d33f1d95dfac1a6d0","ir/analysis/template_params.rs":"a2d2e247c2f51cd90e83f11bce0305c2e498232d015f88192b44e8522e7fd8b1","ir/annotations.rs":"f79de09803a3f3ccb33e366a10a707da98cd00a56ba18312ea927d6c977220a4","ir/comment.rs":"5dcfab0095d967daad9b2927093fce3786b1a2146171580afbf526ba56855e36","ir/comp.rs":"8a96d6760c988d35e07462d66975218fbf3121902d313d71c553f2b473d37bc8","ir/context.rs":"ee5606460afff17c2fcfbd72779edb6038b6ba390e8b7a5b2ccb694acb7036cb","ir/derive.rs":"e5581852eec87918901a129284b4965aefc8a19394187a8095779a084f28fabe","ir/dot.rs":"2d79d698e6ac59ce032840e62ff11103abed1d5e9e700cf383b492333eeebe1f","ir/enum_ty.rs":"13c1de2d0668f811ea1f3353c77c892e475b2e6f5935f6a0b7f711c0ffa64c21","ir/function.rs":"36d9967cf9cd28352b682747376c5e4ef5f679eff9907b670edd2c670058bbce","ir/int.rs":"68a86182743ec338d58e42203364dc7c8970cb7ec3550433ca92f0c9489b4442","ir/item.rs":"6098bceb8d98fe72e1442ea71d36a5eabfdb153aa767527d235b654f912a9333","ir/item_kind.rs":"7666a1ff1b8260978b790a08b4139ab56b5c65714a5652bbcec7faa7443adc36","ir/layout.rs":"61a0e4250ceab889606973f930f4d692837a13a69ae2579738ff09843fed3d65","ir/mod.rs":"713cd537434567003197a123cbae679602c715e976d22f7b23dafd0826ea4c70","ir/module.rs":"7cae5561bcf84a5c3b1ee8f1c3336a33b7f44f0d5ffe885fb108289983fe763e","ir/objc.rs":"dd394c1db6546cbe5111ce5cd2f211f9839aba81c5e7228c2a68fba386bc259f","ir/template.rs":"3bb3e7f6ec28eff73c2032922d30b93d70da117b848e9cb02bdf6c9a74294f7f","ir/traversal.rs":"0c37a0898801ad39bffc8dddd1ee8baa61bb7cf4f3fdc25c8fdd56b6c96ada65","ir/ty.rs":"d910af9f9cc67513914f71553ea2f493562ea8cc20117cb17b7658fe5d670caf","ir/var.rs":"f3ffbfc26de1dc6e23fa46a156f98aec1c6b339dbd7627934a4ec57332a25650","lib.rs":"0edba5c8926ff08c6ae8ca5c64cb6ed89acf0d358f3bb6fa0eb0fc4f0f055f01","log_stubs.rs":"9f974e041e35c8c7e29985d27ae5cd0858d68f8676d1dc005c6388d7d011707f","parse.rs":"4ffc54415eadb622ee488603862788c78361ef2c889de25259441a340c2a010f","regex_set.rs":"ead517110d8ef80e222326d000a4f02b9bd2ce99b58a514ee90b89f0042f6a81","time.rs":"8efe317e7c6b5ba8e0865ce7b49ca775ee8a02590f4241ef62f647fa3c22b68e"},"package":"36d860121800b2a9a94f9b5604b332d5cffb234ce17609ea479d723dbc9d3885"}

115
third-party/vendor/bindgen/Cargo.toml vendored Normal file
View file

@ -0,0 +1,115 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.57.0"
name = "bindgen"
version = "0.63.0"
authors = [
"Jyun-Yan You <jyyou.tw@gmail.com>",
"Emilio Cobos Álvarez <emilio@crisal.io>",
"Nick Fitzgerald <fitzgen@gmail.com>",
"The Servo project developers",
]
build = "build.rs"
description = "Automatically generates Rust FFI bindings to C and C++ libraries."
homepage = "https://rust-lang.github.io/rust-bindgen/"
documentation = "https://docs.rs/bindgen"
readme = "README.md"
keywords = [
"bindings",
"ffi",
"code-generation",
]
categories = [
"external-ffi-bindings",
"development-tools::ffi",
]
license = "BSD-3-Clause"
repository = "https://github.com/rust-lang/rust-bindgen"
[lib]
name = "bindgen"
path = "./lib.rs"
[dependencies.bitflags]
version = "1.0.3"
[dependencies.cexpr]
version = "0.6"
[dependencies.clang-sys]
version = "1"
features = ["clang_6_0"]
[dependencies.lazy_static]
version = "1"
[dependencies.lazycell]
version = "1"
[dependencies.log]
version = "0.4"
optional = true
[dependencies.peeking_take_while]
version = "0.1.2"
[dependencies.proc-macro2]
version = "1"
default-features = false
[dependencies.quote]
version = "1"
default-features = false
[dependencies.regex]
version = "1.5"
features = [
"std",
"unicode",
]
default-features = false
[dependencies.rustc-hash]
version = "1.0.1"
[dependencies.shlex]
version = "1"
[dependencies.syn]
version = "1.0.99"
features = [
"full",
"extra-traits",
"visit-mut",
]
[dependencies.which]
version = "4.2.1"
optional = true
default-features = false
[features]
default = [
"logging",
"runtime",
"which-rustfmt",
]
logging = ["log"]
runtime = ["clang-sys/runtime"]
static = ["clang-sys/static"]
testing_only_docs = []
testing_only_extra_assertions = []
testing_only_libclang_5 = []
testing_only_libclang_9 = []
which-rustfmt = ["which"]

29
third-party/vendor/bindgen/LICENSE vendored Normal file
View file

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2013, Jyun-Yan You
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

85
third-party/vendor/bindgen/README.md vendored Normal file
View file

@ -0,0 +1,85 @@
[![crates.io](https://img.shields.io/crates/v/bindgen.svg)](https://crates.io/crates/bindgen)
[![docs.rs](https://docs.rs/bindgen/badge.svg)](https://docs.rs/bindgen/)
# `bindgen`
**`bindgen` automatically generates Rust FFI bindings to C (and some C++) libraries.**
For example, given the C header `doggo.h`:
```c
typedef struct Doggo {
int many;
char wow;
} Doggo;
void eleven_out_of_ten_majestic_af(Doggo* pupper);
```
`bindgen` produces Rust FFI code allowing you to call into the `doggo` library's
functions and use its types:
```rust
/* automatically generated by rust-bindgen 0.99.9 */
#[repr(C)]
pub struct Doggo {
pub many: ::std::os::raw::c_int,
pub wow: ::std::os::raw::c_char,
}
extern "C" {
pub fn eleven_out_of_ten_majestic_af(pupper: *mut Doggo);
}
```
## Users Guide
[📚 Read the `bindgen` users guide here! 📚](https://rust-lang.github.io/rust-bindgen)
## MSRV
The minimum supported Rust version is **1.57.0**.
No MSRV bump policy has been established yet, so MSRV may increase in any release.
The MSRV is the minimum Rust version that can be used to *compile* `bindgen`. However, `bindgen` can generate bindings that are compatible with Rust versions below the current MSRV.
## API Reference
[API reference documentation is on docs.rs](https://docs.rs/bindgen)
## Environment Variables
In addition to the [library API](https://docs.rs/bindgen) and [executable command-line API][bindgen-cmdline],
`bindgen` can be controlled through environment variables.
End-users should set these environment variables to modify `bindgen`'s behavior without modifying the source code of direct consumers of `bindgen`.
- `BINDGEN_EXTRA_CLANG_ARGS`: extra arguments to pass to `clang`
- Arguments are whitespace-separated
- Use shell-style quoting to pass through whitespace
- Examples:
- Specify alternate sysroot: `--sysroot=/path/to/sysroot`
- Add include search path with spaces: `-I"/path/with spaces"`
- `BINDGEN_EXTRA_CLANG_ARGS_<TARGET>`: similar to `BINDGEN_EXTRA_CLANG_ARGS`,
but used to set per-target arguments to pass to clang. Useful to set system include
directories in a target-specific way in cross-compilation environments with multiple targets.
Has precedence over `BINDGEN_EXTRA_CLANG_ARGS`.
Additionally, `bindgen` uses `libclang` to parse C and C++ header files.
To modify how `bindgen` searches for `libclang`, see the [`clang-sys` documentation][clang-sys-env].
For more details on how `bindgen` uses `libclang`, see the [`bindgen` users guide][bindgen-book-clang].
## Releases
We don't follow a specific release calendar, but if you need a release please
file an issue requesting that (ping `@emilio` for increased effectiveness).
## Contributing
[See `CONTRIBUTING.md` for hacking on `bindgen`!](./CONTRIBUTING.md)
[bindgen-cmdline]: https://rust-lang.github.io/rust-bindgen/command-line-usage.html
[clang-sys-env]: https://github.com/KyleMayes/clang-sys#environment-variables
[bindgen-book-clang]: https://rust-lang.github.io/rust-bindgen/requirements.html#clang

29
third-party/vendor/bindgen/build.rs vendored Normal file
View file

@ -0,0 +1,29 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
fn main() {
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let mut dst =
File::create(Path::new(&out_dir).join("host-target.txt")).unwrap();
dst.write_all(env::var("TARGET").unwrap().as_bytes())
.unwrap();
// On behalf of clang_sys, rebuild ourselves if important configuration
// variables change, to ensure that bindings get rebuilt if the
// underlying libclang changes.
println!("cargo:rerun-if-env-changed=LLVM_CONFIG_PATH");
println!("cargo:rerun-if-env-changed=LIBCLANG_PATH");
println!("cargo:rerun-if-env-changed=LIBCLANG_STATIC_PATH");
println!("cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS");
println!(
"cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}",
std::env::var("TARGET").unwrap()
);
println!(
"cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}",
std::env::var("TARGET").unwrap().replace('-', "_")
);
}

124
third-party/vendor/bindgen/callbacks.rs vendored Normal file
View file

@ -0,0 +1,124 @@
//! A public API for more fine-grained customization of bindgen behavior.
pub use crate::ir::analysis::DeriveTrait;
pub use crate::ir::derive::CanDerive as ImplementsTrait;
pub use crate::ir::enum_ty::{EnumVariantCustomBehavior, EnumVariantValue};
pub use crate::ir::int::IntKind;
use std::fmt;
/// An enum to allow ignoring parsing of macros.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum MacroParsingBehavior {
/// Ignore the macro, generating no code for it, or anything that depends on
/// it.
Ignore,
/// The default behavior bindgen would have otherwise.
Default,
}
impl Default for MacroParsingBehavior {
fn default() -> Self {
MacroParsingBehavior::Default
}
}
/// A trait to allow configuring different kinds of types in different
/// situations.
pub trait ParseCallbacks: fmt::Debug {
/// This function will be run on every macro that is identified.
fn will_parse_macro(&self, _name: &str) -> MacroParsingBehavior {
MacroParsingBehavior::Default
}
/// This function will run for every function. The returned value determines the name visible
/// in the bindings.
fn generated_name_override(&self, _function_name: &str) -> Option<String> {
None
}
/// The integer kind an integer macro should have, given a name and the
/// value of that macro, or `None` if you want the default to be chosen.
fn int_macro(&self, _name: &str, _value: i64) -> Option<IntKind> {
None
}
/// This will be run on every string macro. The callback cannot influence the further
/// treatment of the macro, but may use the value to generate additional code or configuration.
fn str_macro(&self, _name: &str, _value: &[u8]) {}
/// This will be run on every function-like macro. The callback cannot
/// influence the further treatment of the macro, but may use the value to
/// generate additional code or configuration.
///
/// The first parameter represents the name and argument list (including the
/// parentheses) of the function-like macro. The second parameter represents
/// the expansion of the macro as a sequence of tokens.
fn func_macro(&self, _name: &str, _value: &[&[u8]]) {}
/// This function should return whether, given an enum variant
/// name, and value, this enum variant will forcibly be a constant.
fn enum_variant_behavior(
&self,
_enum_name: Option<&str>,
_original_variant_name: &str,
_variant_value: EnumVariantValue,
) -> Option<EnumVariantCustomBehavior> {
None
}
/// Allows to rename an enum variant, replacing `_original_variant_name`.
fn enum_variant_name(
&self,
_enum_name: Option<&str>,
_original_variant_name: &str,
_variant_value: EnumVariantValue,
) -> Option<String> {
None
}
/// Allows to rename an item, replacing `_original_item_name`.
fn item_name(&self, _original_item_name: &str) -> Option<String> {
None
}
/// This will be called on every file inclusion, with the full path of the included file.
fn include_file(&self, _filename: &str) {}
/// This will be called to determine whether a particular blocklisted type
/// implements a trait or not. This will be used to implement traits on
/// other types containing the blocklisted type.
///
/// * `None`: use the default behavior
/// * `Some(ImplementsTrait::Yes)`: `_name` implements `_derive_trait`
/// * `Some(ImplementsTrait::Manually)`: any type including `_name` can't
/// derive `_derive_trait` but can implemented it manually
/// * `Some(ImplementsTrait::No)`: `_name` doesn't implement `_derive_trait`
fn blocklisted_type_implements_trait(
&self,
_name: &str,
_derive_trait: DeriveTrait,
) -> Option<ImplementsTrait> {
None
}
/// Provide a list of custom derive attributes.
///
/// If no additional attributes are wanted, this function should return an
/// empty `Vec`.
fn add_derives(&self, _info: &DeriveInfo<'_>) -> Vec<String> {
vec![]
}
/// Process a source code comment.
fn process_comment(&self, _comment: &str) -> Option<String> {
None
}
}
/// Relevant information about a type to which new derive attributes will be added using
/// [`ParseCallbacks::add_derives`].
#[non_exhaustive]
pub struct DeriveInfo<'a> {
/// The name of the type.
pub name: &'a str,
}

2224
third-party/vendor/bindgen/clang.rs vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,102 @@
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
} else {
*byte &= !mask;
}
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}

View file

@ -0,0 +1,260 @@
//! Tests for `__BindgenBitfieldUnit`.
//!
//! Note that bit-fields are allocated right to left (least to most significant
//! bits).
//!
//! From the x86 PS ABI:
//!
//! ```c
//! struct {
//! int j : 5;
//! int k : 6;
//! int m : 7;
//! };
//! ```
//!
//! ```ignore
//! +------------------------------------------------------------+
//! | | | | |
//! | padding | m | k | j |
//! |31 18|17 11|10 5|4 0|
//! +------------------------------------------------------------+
//! ```
use super::bitfield_unit::__BindgenBitfieldUnit;
#[test]
fn bitfield_unit_get_bit() {
let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]);
let mut bits = vec![];
for i in 0..16 {
bits.push(unit.get_bit(i));
}
println!();
println!("bits = {:?}", bits);
assert_eq!(
bits,
&[
// 0b10011101
true, false, true, true, true, false, false, true,
// 0b00011101
true, false, true, true, true, false, false, false
]
);
}
#[test]
fn bitfield_unit_set_bit() {
let mut unit =
__BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]);
for i in 0..16 {
if i % 3 == 0 {
unit.set_bit(i, true);
}
}
for i in 0..16 {
assert_eq!(unit.get_bit(i), i % 3 == 0);
}
let mut unit =
__BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]);
for i in 0..16 {
if i % 3 == 0 {
unit.set_bit(i, false);
}
}
for i in 0..16 {
assert_eq!(unit.get_bit(i), i % 3 != 0);
}
}
macro_rules! bitfield_unit_get {
(
$(
With $storage:expr , then get($start:expr, $len:expr) is $expected:expr;
)*
) => {
#[test]
fn bitfield_unit_get() {
$({
let expected = $expected;
let unit = __BindgenBitfieldUnit::<_>::new($storage);
let actual = unit.get($start, $len);
println!();
println!("expected = {:064b}", expected);
println!("actual = {:064b}", actual);
assert_eq!(expected, actual);
})*
}
}
}
bitfield_unit_get! {
// Let's just exhaustively test getting the bits from a single byte, since
// there are few enough combinations...
With [0b11100010], then get(0, 1) is 0;
With [0b11100010], then get(1, 1) is 1;
With [0b11100010], then get(2, 1) is 0;
With [0b11100010], then get(3, 1) is 0;
With [0b11100010], then get(4, 1) is 0;
With [0b11100010], then get(5, 1) is 1;
With [0b11100010], then get(6, 1) is 1;
With [0b11100010], then get(7, 1) is 1;
With [0b11100010], then get(0, 2) is 0b10;
With [0b11100010], then get(1, 2) is 0b01;
With [0b11100010], then get(2, 2) is 0b00;
With [0b11100010], then get(3, 2) is 0b00;
With [0b11100010], then get(4, 2) is 0b10;
With [0b11100010], then get(5, 2) is 0b11;
With [0b11100010], then get(6, 2) is 0b11;
With [0b11100010], then get(0, 3) is 0b010;
With [0b11100010], then get(1, 3) is 0b001;
With [0b11100010], then get(2, 3) is 0b000;
With [0b11100010], then get(3, 3) is 0b100;
With [0b11100010], then get(4, 3) is 0b110;
With [0b11100010], then get(5, 3) is 0b111;
With [0b11100010], then get(0, 4) is 0b0010;
With [0b11100010], then get(1, 4) is 0b0001;
With [0b11100010], then get(2, 4) is 0b1000;
With [0b11100010], then get(3, 4) is 0b1100;
With [0b11100010], then get(4, 4) is 0b1110;
With [0b11100010], then get(0, 5) is 0b00010;
With [0b11100010], then get(1, 5) is 0b10001;
With [0b11100010], then get(2, 5) is 0b11000;
With [0b11100010], then get(3, 5) is 0b11100;
With [0b11100010], then get(0, 6) is 0b100010;
With [0b11100010], then get(1, 6) is 0b110001;
With [0b11100010], then get(2, 6) is 0b111000;
With [0b11100010], then get(0, 7) is 0b1100010;
With [0b11100010], then get(1, 7) is 0b1110001;
With [0b11100010], then get(0, 8) is 0b11100010;
// OK. Now let's test getting bits from across byte boundaries.
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(0, 16) is 0b1111111101010101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(1, 16) is 0b0111111110101010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(2, 16) is 0b0011111111010101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(3, 16) is 0b0001111111101010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(4, 16) is 0b0000111111110101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(5, 16) is 0b0000011111111010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(6, 16) is 0b0000001111111101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(7, 16) is 0b0000000111111110;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(8, 16) is 0b0000000011111111;
}
macro_rules! bitfield_unit_set {
(
$(
set($start:expr, $len:expr, $val:expr) is $expected:expr;
)*
) => {
#[test]
fn bitfield_unit_set() {
$(
let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]);
unit.set($start, $len, $val);
let actual = unit.get(0, 32);
println!();
println!("set({}, {}, {:032b}", $start, $len, $val);
println!("expected = {:064b}", $expected);
println!("actual = {:064b}", actual);
assert_eq!($expected, actual);
)*
}
}
}
bitfield_unit_set! {
// Once again, let's exhaustively test single byte combinations.
set(0, 1, 0b11111111) is 0b00000001;
set(1, 1, 0b11111111) is 0b00000010;
set(2, 1, 0b11111111) is 0b00000100;
set(3, 1, 0b11111111) is 0b00001000;
set(4, 1, 0b11111111) is 0b00010000;
set(5, 1, 0b11111111) is 0b00100000;
set(6, 1, 0b11111111) is 0b01000000;
set(7, 1, 0b11111111) is 0b10000000;
set(0, 2, 0b11111111) is 0b00000011;
set(1, 2, 0b11111111) is 0b00000110;
set(2, 2, 0b11111111) is 0b00001100;
set(3, 2, 0b11111111) is 0b00011000;
set(4, 2, 0b11111111) is 0b00110000;
set(5, 2, 0b11111111) is 0b01100000;
set(6, 2, 0b11111111) is 0b11000000;
set(0, 3, 0b11111111) is 0b00000111;
set(1, 3, 0b11111111) is 0b00001110;
set(2, 3, 0b11111111) is 0b00011100;
set(3, 3, 0b11111111) is 0b00111000;
set(4, 3, 0b11111111) is 0b01110000;
set(5, 3, 0b11111111) is 0b11100000;
set(0, 4, 0b11111111) is 0b00001111;
set(1, 4, 0b11111111) is 0b00011110;
set(2, 4, 0b11111111) is 0b00111100;
set(3, 4, 0b11111111) is 0b01111000;
set(4, 4, 0b11111111) is 0b11110000;
set(0, 5, 0b11111111) is 0b00011111;
set(1, 5, 0b11111111) is 0b00111110;
set(2, 5, 0b11111111) is 0b01111100;
set(3, 5, 0b11111111) is 0b11111000;
set(0, 6, 0b11111111) is 0b00111111;
set(1, 6, 0b11111111) is 0b01111110;
set(2, 6, 0b11111111) is 0b11111100;
set(0, 7, 0b11111111) is 0b01111111;
set(1, 7, 0b11111111) is 0b11111110;
set(0, 8, 0b11111111) is 0b11111111;
// And, now let's cross byte boundaries.
set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111;
set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110;
set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100;
set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000;
set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000;
set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000;
set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000;
set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000;
set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000;
}

View file

@ -0,0 +1,201 @@
use crate::codegen;
use crate::ir::context::BindgenContext;
use crate::ir::function::ClangAbi;
use proc_macro2::Ident;
/// Used to build the output tokens for dynamic bindings.
#[derive(Default)]
pub struct DynamicItems {
/// Tracks the tokens that will appears inside the library struct -- e.g.:
/// ```ignore
/// struct Lib {
/// __library: ::libloading::Library,
/// pub x: Result<unsafe extern ..., ::libloading::Error>, // <- tracks these
/// ...
/// }
/// ```
struct_members: Vec<proc_macro2::TokenStream>,
/// Tracks the tokens that will appear inside the library struct's implementation, e.g.:
///
/// ```ignore
/// impl Lib {
/// ...
/// pub unsafe fn foo(&self, ...) { // <- tracks these
/// ...
/// }
/// }
/// ```
struct_implementation: Vec<proc_macro2::TokenStream>,
/// Tracks the initialization of the fields inside the `::new` constructor of the library
/// struct, e.g.:
/// ```ignore
/// impl Lib {
///
/// pub unsafe fn new<P>(path: P) -> Result<Self, ::libloading::Error>
/// where
/// P: AsRef<::std::ffi::OsStr>,
/// {
/// ...
/// let foo = __library.get(...) ...; // <- tracks these
/// ...
/// }
///
/// ...
/// }
/// ```
constructor_inits: Vec<proc_macro2::TokenStream>,
/// Tracks the information that is passed to the library struct at the end of the `::new`
/// constructor, e.g.:
/// ```ignore
/// impl LibFoo {
/// pub unsafe fn new<P>(path: P) -> Result<Self, ::libloading::Error>
/// where
/// P: AsRef<::std::ffi::OsStr>,
/// {
/// ...
/// Ok(LibFoo {
/// __library: __library,
/// foo,
/// bar, // <- tracks these
/// ...
/// })
/// }
/// }
/// ```
init_fields: Vec<proc_macro2::TokenStream>,
}
impl DynamicItems {
pub fn new() -> Self {
Self::default()
}
pub fn get_tokens(
&self,
lib_ident: Ident,
ctx: &BindgenContext,
) -> proc_macro2::TokenStream {
let struct_members = &self.struct_members;
let constructor_inits = &self.constructor_inits;
let init_fields = &self.init_fields;
let struct_implementation = &self.struct_implementation;
let from_library = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { Self::from_library(library) })
} else {
quote!(Self::from_library(library))
};
quote! {
extern crate libloading;
pub struct #lib_ident {
__library: ::libloading::Library,
#(#struct_members)*
}
impl #lib_ident {
pub unsafe fn new<P>(
path: P
) -> Result<Self, ::libloading::Error>
where P: AsRef<::std::ffi::OsStr> {
let library = ::libloading::Library::new(path)?;
#from_library
}
pub unsafe fn from_library<L>(
library: L
) -> Result<Self, ::libloading::Error>
where L: Into<::libloading::Library> {
let __library = library.into();
#( #constructor_inits )*
Ok(#lib_ident {
__library,
#( #init_fields ),*
})
}
#( #struct_implementation )*
}
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn push(
&mut self,
ident: Ident,
abi: ClangAbi,
is_variadic: bool,
is_required: bool,
args: Vec<proc_macro2::TokenStream>,
args_identifiers: Vec<proc_macro2::TokenStream>,
ret: proc_macro2::TokenStream,
ret_ty: proc_macro2::TokenStream,
attributes: Vec<proc_macro2::TokenStream>,
ctx: &BindgenContext,
) {
if !is_variadic {
assert_eq!(args.len(), args_identifiers.len());
}
let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret };
let member = if is_required {
signature
} else {
quote! { Result<#signature, ::libloading::Error> }
};
self.struct_members.push(quote! {
pub #ident: #member,
});
// N.B: If the signature was required, it won't be wrapped in a Result<...>
// and we can simply call it directly.
let fn_ = if is_required {
quote! { self.#ident }
} else {
quote! { self.#ident.as_ref().expect("Expected function, got error.") }
};
let call_body = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { (#fn_)(#( #args_identifiers ),*) })
} else {
quote!((#fn_)(#( #args_identifiers ),*) )
};
// We can't implement variadic functions from C easily, so we allow to
// access the function pointer so that the user can call it just fine.
if !is_variadic {
self.struct_implementation.push(quote! {
#(#attributes)*
pub unsafe fn #ident ( &self, #( #args ),* ) -> #ret_ty {
#call_body
}
});
}
// N.B: Unwrap the signature upon construction if it is required to be resolved.
let ident_str = codegen::helpers::ast_ty::cstr_expr(ident.to_string());
let library_get = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { __library.get(#ident_str) })
} else {
quote!(__library.get(#ident_str))
};
self.constructor_inits.push(if is_required {
quote! {
let #ident = #library_get.map(|sym| *sym)?;
}
} else {
quote! {
let #ident = #library_get.map(|sym| *sym);
}
});
self.init_fields.push(quote! {
#ident
});
}
}

View file

@ -0,0 +1,33 @@
use std::error;
use std::fmt;
/// Errors that can occur during code generation.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Error {
/// Tried to generate an opaque blob for a type that did not have a layout.
NoLayoutForOpaqueBlob,
/// Tried to instantiate an opaque template definition, or a template
/// definition that is too difficult for us to understand (like a partial
/// template specialization).
InstantiationOfOpaqueType,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Error::NoLayoutForOpaqueBlob => {
"Tried to generate an opaque blob, but had no layout"
}
Error::InstantiationOfOpaqueType => {
"Instantiation of opaque template type or partial template \
specialization"
}
})
}
}
impl error::Error for Error {}
/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`.
pub type Result<T> = ::std::result::Result<T, Error>;

View file

@ -0,0 +1,311 @@
//! Helpers for code generation that don't need macro expansion.
use crate::ir::context::BindgenContext;
use crate::ir::layout::Layout;
use proc_macro2::{Ident, Span, TokenStream};
use quote::TokenStreamExt;
pub mod attributes {
use proc_macro2::{Ident, Span, TokenStream};
use std::str::FromStr;
pub fn repr(which: &str) -> TokenStream {
let which = Ident::new(which, Span::call_site());
quote! {
#[repr( #which )]
}
}
pub fn repr_list(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones
.iter()
.cloned()
.map(|one| TokenStream::from_str(one).expect("repr to be valid"));
quote! {
#[repr( #( #which_ones ),* )]
}
}
pub fn derives(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones
.iter()
.cloned()
.map(|one| TokenStream::from_str(one).expect("derive to be valid"));
quote! {
#[derive( #( #which_ones ),* )]
}
}
pub fn inline() -> TokenStream {
quote! {
#[inline]
}
}
pub fn must_use() -> TokenStream {
quote! {
#[must_use]
}
}
pub fn non_exhaustive() -> TokenStream {
quote! {
#[non_exhaustive]
}
}
pub fn doc(comment: String) -> TokenStream {
if comment.is_empty() {
quote!()
} else {
quote!(#[doc = #comment])
}
}
pub fn link_name(name: &str) -> TokenStream {
// LLVM mangles the name by default but it's already mangled.
// Prefixing the name with \u{1} should tell LLVM to not mangle it.
let name = format!("\u{1}{}", name);
quote! {
#[link_name = #name]
}
}
}
/// Generates a proper type for a field or type with a given `Layout`, that is,
/// a type with the correct size and alignment restrictions.
pub fn blob(ctx: &BindgenContext, layout: Layout) -> TokenStream {
let opaque = layout.opaque();
// FIXME(emilio, #412): We fall back to byte alignment, but there are
// some things that legitimately are more than 8-byte aligned.
//
// Eventually we should be able to `unwrap` here, but...
let ty_name = match opaque.known_rust_type_for_array(ctx) {
Some(ty) => ty,
None => {
warn!("Found unknown alignment on code generation!");
"u8"
}
};
let ty_name = Ident::new(ty_name, Span::call_site());
let data_len = opaque.array_size(ctx).unwrap_or(layout.size);
if data_len == 1 {
quote! {
#ty_name
}
} else {
quote! {
[ #ty_name ; #data_len ]
}
}
}
/// Integer type of the same size as the given `Layout`.
pub fn integer_type(
ctx: &BindgenContext,
layout: Layout,
) -> Option<TokenStream> {
let name = Layout::known_type_for_size(ctx, layout.size)?;
let name = Ident::new(name, Span::call_site());
Some(quote! { #name })
}
/// Generates a bitfield allocation unit type for a type with the given `Layout`.
pub fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> TokenStream {
let mut tokens = quote! {};
if ctx.options().enable_cxx_namespaces {
tokens.append_all(quote! { root:: });
}
let size = layout.size;
tokens.append_all(quote! {
__BindgenBitfieldUnit<[u8; #size]>
});
tokens
}
pub mod ast_ty {
use crate::ir::context::BindgenContext;
use crate::ir::function::FunctionSig;
use crate::ir::layout::Layout;
use crate::ir::ty::FloatKind;
use proc_macro2::{self, TokenStream};
use std::str::FromStr;
pub fn c_void(ctx: &BindgenContext) -> TokenStream {
// ctypes_prefix takes precedence
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
quote! {
#prefix::c_void
}
}
None => {
if ctx.options().use_core &&
ctx.options().rust_features.core_ffi_c_void
{
quote! { ::core::ffi::c_void }
} else {
quote! { ::std::os::raw::c_void }
}
}
}
}
pub fn raw_type(ctx: &BindgenContext, name: &str) -> TokenStream {
let ident = ctx.rust_ident_raw(name);
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
quote! {
#prefix::#ident
}
}
None => {
if ctx.options().use_core &&
ctx.options().rust_features().core_ffi_c
{
quote! {
::core::ffi::#ident
}
} else {
quote! {
::std::os::raw::#ident
}
}
}
}
}
pub fn float_kind_rust_type(
ctx: &BindgenContext,
fk: FloatKind,
layout: Option<Layout>,
) -> TokenStream {
// TODO: we probably should take the type layout into account more
// often?
//
// Also, maybe this one shouldn't be the default?
match (fk, ctx.options().convert_floats) {
(FloatKind::Float, true) => quote! { f32 },
(FloatKind::Double, true) => quote! { f64 },
(FloatKind::Float, false) => raw_type(ctx, "c_float"),
(FloatKind::Double, false) => raw_type(ctx, "c_double"),
(FloatKind::LongDouble, _) => {
match layout {
Some(layout) => {
match layout.size {
4 => quote! { f32 },
8 => quote! { f64 },
// TODO(emilio): If rust ever gains f128 we should
// use it here and below.
_ => super::integer_type(ctx, layout)
.unwrap_or(quote! { f64 }),
}
}
None => {
debug_assert!(
false,
"How didn't we know the layout for a primitive type?"
);
quote! { f64 }
}
}
}
(FloatKind::Float128, _) => {
if ctx.options().rust_features.i128_and_u128 {
quote! { u128 }
} else {
quote! { [u64; 2] }
}
}
}
}
pub fn int_expr(val: i64) -> TokenStream {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::i64_unsuffixed(val);
quote!(#val)
}
pub fn uint_expr(val: u64) -> TokenStream {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::u64_unsuffixed(val);
quote!(#val)
}
pub fn byte_array_expr(bytes: &[u8]) -> TokenStream {
let mut bytes: Vec<_> = bytes.to_vec();
bytes.push(0);
quote! { [ #(#bytes),* ] }
}
pub fn cstr_expr(mut string: String) -> TokenStream {
string.push('\0');
let b = proc_macro2::Literal::byte_string(string.as_bytes());
quote! {
#b
}
}
pub fn float_expr(ctx: &BindgenContext, f: f64) -> Result<TokenStream, ()> {
if f.is_finite() {
let val = proc_macro2::Literal::f64_unsuffixed(f);
return Ok(quote!(#val));
}
let prefix = ctx.trait_prefix();
if f.is_nan() {
return Ok(quote! {
::#prefix::f64::NAN
});
}
if f.is_infinite() {
return Ok(if f.is_sign_positive() {
quote! {
::#prefix::f64::INFINITY
}
} else {
quote! {
::#prefix::f64::NEG_INFINITY
}
});
}
warn!("Unknown non-finite float number: {:?}", f);
Err(())
}
pub fn arguments_from_signature(
signature: &FunctionSig,
ctx: &BindgenContext,
) -> Vec<TokenStream> {
let mut unnamed_arguments = 0;
signature
.argument_types()
.iter()
.map(|&(ref name, _ty)| match *name {
Some(ref name) => {
let name = ctx.rust_ident(name);
quote! { #name }
}
None => {
unnamed_arguments += 1;
let name =
ctx.rust_ident(format!("arg{}", unnamed_arguments));
quote! { #name }
}
})
.collect()
}
}

View file

@ -0,0 +1,245 @@
use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods};
use crate::ir::context::BindgenContext;
use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName};
use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
pub fn gen_debug_impl(
ctx: &BindgenContext,
fields: &[Field],
item: &Item,
kind: CompKind,
) -> proc_macro2::TokenStream {
let struct_name = item.canonical_name(ctx);
let mut format_string = format!("{} {{{{ ", struct_name);
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
format_string.push_str("opaque");
} else {
match kind {
CompKind::Union => {
format_string.push_str("union");
}
CompKind::Struct => {
let processed_fields = fields.iter().filter_map(|f| match f {
Field::DataMember(ref fd) => fd.impl_debug(ctx, ()),
Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()),
});
for (i, (fstring, toks)) in processed_fields.enumerate() {
if i > 0 {
format_string.push_str(", ");
}
tokens.extend(toks);
format_string.push_str(&fstring);
}
}
}
}
format_string.push_str(" }}");
tokens.insert(0, quote! { #format_string });
let prefix = ctx.trait_prefix();
quote! {
fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result {
write!(f, #( #tokens ),*)
}
}
}
/// A trait for the things which we can codegen tokens that contribute towards a
/// generated `impl Debug`.
pub trait ImplDebug<'a> {
/// Any extra parameter required by this a particular `ImplDebug` implementation.
type Extra;
/// Generate a format string snippet to be included in the larger `impl Debug`
/// format string, and the code to get the format string's interpolation values.
fn impl_debug(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)>;
}
impl<'a> ImplDebug<'a> for FieldData {
type Extra = ();
fn impl_debug(
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
if let Some(name) = self.name() {
ctx.resolve_item(self.ty()).impl_debug(ctx, name)
} else {
None
}
}
}
impl<'a> ImplDebug<'a> for BitfieldUnit {
type Extra = ();
fn impl_debug(
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
let mut format_string = String::new();
let mut tokens = vec![];
for (i, bitfield) in self.bitfields().iter().enumerate() {
if i > 0 {
format_string.push_str(", ");
}
if let Some(bitfield_name) = bitfield.name() {
format_string.push_str(&format!("{} : {{:?}}", bitfield_name));
let getter_name = bitfield.getter_name();
let name_ident = ctx.rust_ident_raw(getter_name);
tokens.push(quote! {
self.#name_ident ()
});
}
}
Some((format_string, tokens))
}
}
impl<'a> ImplDebug<'a> for Item {
type Extra = &'a str;
fn impl_debug(
&self,
ctx: &BindgenContext,
name: &str,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
let name_ident = ctx.rust_ident(name);
// We don't know if blocklisted items `impl Debug` or not, so we can't
// add them to the format string we're building up.
if !ctx.allowlisted_items().contains(&self.id()) {
return None;
}
let ty = match self.as_type() {
Some(ty) => ty,
None => {
return None;
}
};
fn debug_print(
name: &str,
name_ident: proc_macro2::TokenStream,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
Some((
format!("{}: {{:?}}", name),
vec![quote! {
self.#name_ident
}],
))
}
match *ty.kind() {
// Handle the simple cases.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::Comp(..) |
TypeKind::ObjCSel => debug_print(name, quote! { #name_ident }),
TypeKind::TemplateInstantiation(ref inst) => {
if inst.is_opaque(ctx, self) {
Some((format!("{}: opaque", name), vec![]))
} else {
debug_print(name, quote! { #name_ident })
}
}
// The generic is not required to implement Debug, so we can not debug print that type
TypeKind::TypeParam => {
Some((format!("{}: Non-debuggable generic", name), vec![]))
}
TypeKind::Array(_, len) => {
// Generics are not required to implement Debug
if self.has_type_param_in_array(ctx) {
Some((
format!("{}: Array with length {}", name, len),
vec![],
))
} else if len < RUST_DERIVE_IN_ARRAY_LIMIT ||
ctx.options().rust_features().larger_arrays
{
// The simple case
debug_print(name, quote! { #name_ident })
} else if ctx.options().use_core {
// There is no String in core; reducing field visibility to avoid breaking
// no_std setups.
Some((format!("{}: [...]", name), vec![]))
} else {
// Let's implement our own print function
Some((
format!("{}: [{{}}]", name),
vec![quote! {
self.#name_ident
.iter()
.enumerate()
.map(|(i, v)| format!("{}{:?}", if i > 0 { ", " } else { "" }, v))
.collect::<String>()
}],
))
}
}
TypeKind::Vector(_, len) => {
if ctx.options().use_core {
// There is no format! in core; reducing field visibility to avoid breaking
// no_std setups.
Some((format!("{}(...)", name), vec![]))
} else {
let self_ids = 0..len;
Some((
format!("{}({{}})", name),
vec![quote! {
#(format!("{:?}", self.#self_ids)),*
}],
))
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
// We follow the aliases
ctx.resolve_item(t).impl_debug(ctx, name)
}
TypeKind::Pointer(inner) => {
let inner_type = ctx.resolve_type(inner).canonical_type(ctx);
match *inner_type.kind() {
TypeKind::Function(ref sig)
if !sig.function_pointers_can_derive() =>
{
Some((format!("{}: FunctionPointer", name), vec![]))
}
_ => debug_print(name, quote! { #name_ident }),
}
}
TypeKind::Opaque => None,
}
}
}

View file

@ -0,0 +1,142 @@
use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods};
use crate::ir::context::BindgenContext;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
/// Generate a manual implementation of `PartialEq` trait for the
/// specified compound type.
pub fn gen_partialeq_impl(
ctx: &BindgenContext,
comp_info: &CompInfo,
item: &Item,
ty_for_impl: &proc_macro2::TokenStream,
) -> Option<proc_macro2::TokenStream> {
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
tokens.push(quote! {
&self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..]
});
} else if comp_info.kind() == CompKind::Union {
assert!(!ctx.options().rust_features().untagged_union);
tokens.push(quote! {
&self.bindgen_union_field[..] == &other.bindgen_union_field[..]
});
} else {
for base in comp_info.base_members().iter() {
if !base.requires_storage(ctx) {
continue;
}
let ty_item = ctx.resolve_item(base.ty);
let field_name = &base.field_name;
if ty_item.is_opaque(ctx, &()) {
let field_name = ctx.rust_ident(field_name);
tokens.push(quote! {
&self. #field_name [..] == &other. #field_name [..]
});
} else {
tokens.push(gen_field(ctx, ty_item, field_name));
}
}
for field in comp_info.fields() {
match *field {
Field::DataMember(ref fd) => {
let ty_item = ctx.resolve_item(fd.ty());
let name = fd.name().unwrap();
tokens.push(gen_field(ctx, ty_item, name));
}
Field::Bitfields(ref bu) => {
for bitfield in bu.bitfields() {
if bitfield.name().is_some() {
let getter_name = bitfield.getter_name();
let name_ident = ctx.rust_ident_raw(getter_name);
tokens.push(quote! {
self.#name_ident () == other.#name_ident ()
});
}
}
}
}
}
}
Some(quote! {
fn eq(&self, other: & #ty_for_impl) -> bool {
#( #tokens )&&*
}
})
}
fn gen_field(
ctx: &BindgenContext,
ty_item: &Item,
name: &str,
) -> proc_macro2::TokenStream {
fn quote_equals(
name_ident: proc_macro2::Ident,
) -> proc_macro2::TokenStream {
quote! { self.#name_ident == other.#name_ident }
}
let name_ident = ctx.rust_ident(name);
let ty = ty_item.expect_type();
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Comp(..) |
TypeKind::Pointer(_) |
TypeKind::Function(..) |
TypeKind::Opaque => quote_equals(name_ident),
TypeKind::TemplateInstantiation(ref inst) => {
if inst.is_opaque(ctx, ty_item) {
quote! {
&self. #name_ident [..] == &other. #name_ident [..]
}
} else {
quote_equals(name_ident)
}
}
TypeKind::Array(_, len) => {
if len <= RUST_DERIVE_IN_ARRAY_LIMIT ||
ctx.options().rust_features().larger_arrays
{
quote_equals(name_ident)
} else {
quote! {
&self. #name_ident [..] == &other. #name_ident [..]
}
}
}
TypeKind::Vector(_, len) => {
let self_ids = 0..len;
let other_ids = 0..len;
quote! {
#(self.#self_ids == other.#other_ids &&)* true
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
let inner_item = ctx.resolve_item(t);
gen_field(ctx, inner_item, name)
}
}
}

5047
third-party/vendor/bindgen/codegen/mod.rs vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,66 @@
use syn::{
visit_mut::{visit_item_mod_mut, VisitMut},
Item, ItemForeignMod, ItemMod,
};
pub(super) fn merge_extern_blocks(item_mod: &mut ItemMod) {
Visitor.visit_item_mod_mut(item_mod)
}
struct Visitor;
impl VisitMut for Visitor {
fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) {
if let Some((_, ref mut items)) = item_mod.content {
// Keep all the extern blocks in a different `Vec` for faster search.
let mut extern_blocks = Vec::<ItemForeignMod>::new();
for item in std::mem::take(items) {
if let Item::ForeignMod(ItemForeignMod {
attrs,
abi,
brace_token,
items: extern_block_items,
}) = item
{
let mut exists = false;
for extern_block in &mut extern_blocks {
// Check if there is a extern block with the same ABI and
// attributes.
if extern_block.attrs == attrs &&
extern_block.abi == abi
{
// Merge the items of the two blocks.
extern_block
.items
.extend_from_slice(&extern_block_items);
exists = true;
break;
}
}
// If no existing extern block had the same ABI and attributes, store
// it.
if !exists {
extern_blocks.push(ItemForeignMod {
attrs,
abi,
brace_token,
items: extern_block_items,
});
}
} else {
// If the item is not an extern block, we don't have to do anything and just
// push it back.
items.push(item);
}
}
// Move all the extern blocks alongside the rest of the items.
for extern_block in extern_blocks {
items.push(Item::ForeignMod(extern_block));
}
}
visit_item_mod_mut(self, item_mod)
}
}

View file

@ -0,0 +1,66 @@
use proc_macro2::TokenStream;
use quote::ToTokens;
use syn::{parse2, ItemMod};
use crate::BindgenOptions;
mod merge_extern_blocks;
mod sort_semantically;
use merge_extern_blocks::merge_extern_blocks;
use sort_semantically::sort_semantically;
struct PostProcessingPass {
should_run: fn(&BindgenOptions) -> bool,
run: fn(&mut ItemMod),
}
// TODO: This can be a const fn when mutable references are allowed in const
// context.
macro_rules! pass {
($pass:ident) => {
PostProcessingPass {
should_run: |options| options.$pass,
run: |item_mod| $pass(item_mod),
}
};
}
const PASSES: &[PostProcessingPass] =
&[pass!(merge_extern_blocks), pass!(sort_semantically)];
pub(crate) fn postprocessing(
items: Vec<TokenStream>,
options: &BindgenOptions,
) -> TokenStream {
let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options));
if !require_syn {
return items.into_iter().collect();
}
let module_wrapped_tokens =
quote!(mod wrapper_for_postprocessing_hack { #( #items )* });
// This syn business is a hack, for now. This means that we are re-parsing already
// generated code using `syn` (as opposed to `quote`) because `syn` provides us more
// control over the elements.
// One caveat is that some of the items coming from `quote`d output might have
// multiple items within them. Hence, we have to wrap the incoming in a `mod`.
// The `unwrap` here is deliberate because bindgen should generate valid rust items at all
// times.
let mut item_mod = parse2::<ItemMod>(module_wrapped_tokens).unwrap();
for pass in PASSES {
if (pass.should_run)(options) {
(pass.run)(&mut item_mod);
}
}
let synful_items = item_mod
.content
.map(|(_, items)| items)
.unwrap_or_default()
.into_iter()
.map(|item| item.into_token_stream());
quote! { #( #synful_items )* }
}

View file

@ -0,0 +1,38 @@
use syn::{
visit_mut::{visit_item_mod_mut, VisitMut},
Item, ItemMod,
};
pub(super) fn sort_semantically(item_mod: &mut ItemMod) {
Visitor.visit_item_mod_mut(item_mod)
}
struct Visitor;
impl VisitMut for Visitor {
fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) {
if let Some((_, ref mut items)) = item_mod.content {
items.sort_by_key(|item| match item {
Item::Type(_) => 0,
Item::Struct(_) => 1,
Item::Const(_) => 2,
Item::Fn(_) => 3,
Item::Enum(_) => 4,
Item::Union(_) => 5,
Item::Static(_) => 6,
Item::Trait(_) => 7,
Item::TraitAlias(_) => 8,
Item::Impl(_) => 9,
Item::Mod(_) => 10,
Item::Use(_) => 11,
Item::Verbatim(_) => 12,
Item::ExternCrate(_) => 13,
Item::ForeignMod(_) => 14,
Item::Macro(_) => 15,
Item::Macro2(_) => 16,
_ => 18,
});
}
visit_item_mod_mut(self, item_mod)
}
}

View file

@ -0,0 +1,444 @@
//! Helpers for code generation that need struct layout
use super::helpers;
use crate::ir::comp::CompInfo;
use crate::ir::context::BindgenContext;
use crate::ir::layout::Layout;
use crate::ir::ty::{Type, TypeKind};
use proc_macro2::{self, Ident, Span};
use std::cmp;
const MAX_GUARANTEED_ALIGN: usize = 8;
/// Trace the layout of struct.
#[derive(Debug)]
pub struct StructLayoutTracker<'a> {
name: &'a str,
ctx: &'a BindgenContext,
comp: &'a CompInfo,
is_packed: bool,
known_type_layout: Option<Layout>,
is_rust_union: bool,
can_copy_union_fields: bool,
latest_offset: usize,
padding_count: usize,
latest_field_layout: Option<Layout>,
max_field_align: usize,
last_field_was_bitfield: bool,
}
/// Returns a size aligned to a given value.
pub fn align_to(size: usize, align: usize) -> usize {
if align == 0 {
return size;
}
let rem = size % align;
if rem == 0 {
return size;
}
size + align - rem
}
/// Returns the lower power of two byte count that can hold at most n bits.
pub fn bytes_from_bits_pow2(mut n: usize) -> usize {
if n == 0 {
return 0;
}
if n <= 8 {
return 1;
}
if !n.is_power_of_two() {
n = n.next_power_of_two();
}
n / 8
}
#[test]
fn test_align_to() {
assert_eq!(align_to(1, 1), 1);
assert_eq!(align_to(1, 2), 2);
assert_eq!(align_to(1, 4), 4);
assert_eq!(align_to(5, 1), 5);
assert_eq!(align_to(17, 4), 20);
}
#[test]
fn test_bytes_from_bits_pow2() {
assert_eq!(bytes_from_bits_pow2(0), 0);
for i in 1..9 {
assert_eq!(bytes_from_bits_pow2(i), 1);
}
for i in 9..17 {
assert_eq!(bytes_from_bits_pow2(i), 2);
}
for i in 17..33 {
assert_eq!(bytes_from_bits_pow2(i), 4);
}
}
impl<'a> StructLayoutTracker<'a> {
pub fn new(
ctx: &'a BindgenContext,
comp: &'a CompInfo,
ty: &'a Type,
name: &'a str,
) -> Self {
let known_type_layout = ty.layout(ctx);
let is_packed = comp.is_packed(ctx, known_type_layout.as_ref());
let (is_rust_union, can_copy_union_fields) =
comp.is_rust_union(ctx, known_type_layout.as_ref(), name);
StructLayoutTracker {
name,
ctx,
comp,
is_packed,
known_type_layout,
is_rust_union,
can_copy_union_fields,
latest_offset: 0,
padding_count: 0,
latest_field_layout: None,
max_field_align: 0,
last_field_was_bitfield: false,
}
}
pub fn can_copy_union_fields(&self) -> bool {
self.can_copy_union_fields
}
pub fn is_rust_union(&self) -> bool {
self.is_rust_union
}
pub fn saw_vtable(&mut self) {
debug!("saw vtable for {}", self.name);
let ptr_size = self.ctx.target_pointer_size();
self.latest_offset += ptr_size;
self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
self.max_field_align = ptr_size;
}
pub fn saw_base(&mut self, base_ty: &Type) {
debug!("saw base for {}", self.name);
if let Some(layout) = base_ty.layout(self.ctx) {
self.align_to_latest_field(layout);
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
}
pub fn saw_bitfield_unit(&mut self, layout: Layout) {
debug!("saw bitfield unit for {}: {:?}", self.name, layout);
self.align_to_latest_field(layout);
self.latest_offset += layout.size;
debug!(
"Offset: <bitfield>: {} -> {}",
self.latest_offset - layout.size,
self.latest_offset
);
self.latest_field_layout = Some(layout);
self.last_field_was_bitfield = true;
// NB: We intentionally don't update the max_field_align here, since our
// bitfields code doesn't necessarily guarantee it, so we need to
// actually generate the dummy alignment.
}
/// Returns a padding field if necessary for a given new field _before_
/// adding that field.
pub fn saw_field(
&mut self,
field_name: &str,
field_ty: &Type,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
let mut field_layout = field_ty.layout(self.ctx)?;
if let TypeKind::Array(inner, len) =
*field_ty.canonical_type(self.ctx).kind()
{
// FIXME(emilio): As an _ultra_ hack, we correct the layout returned
// by arrays of structs that have a bigger alignment than what we
// can support.
//
// This means that the structs in the array are super-unsafe to
// access, since they won't be properly aligned, but there's not too
// much we can do about it.
if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
{
if layout.align > MAX_GUARANTEED_ALIGN {
field_layout.size =
align_to(layout.size, layout.align) * len;
field_layout.align = MAX_GUARANTEED_ALIGN;
}
}
}
self.saw_field_with_layout(field_name, field_layout, field_offset)
}
pub fn saw_field_with_layout(
&mut self,
field_name: &str,
field_layout: Layout,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
let is_union = self.comp.is_union();
let padding_bytes = match field_offset {
Some(offset) if offset / 8 > self.latest_offset => {
offset / 8 - self.latest_offset
}
_ => {
if will_merge_with_bitfield ||
field_layout.align == 0 ||
is_union
{
0
} else if !self.is_packed {
self.padding_bytes(field_layout)
} else if let Some(l) = self.known_type_layout {
self.padding_bytes(l)
} else {
0
}
}
};
self.latest_offset += padding_bytes;
let padding_layout = if self.is_packed || is_union {
None
} else {
let force_padding = self.ctx.options().force_explicit_padding;
// Otherwise the padding is useless.
let need_padding = force_padding ||
padding_bytes >= field_layout.align ||
field_layout.align > MAX_GUARANTEED_ALIGN;
debug!(
"Offset: <padding>: {} -> {}",
self.latest_offset - padding_bytes,
self.latest_offset
);
debug!(
"align field {} to {}/{} with {} padding bytes {:?}",
field_name,
self.latest_offset,
field_offset.unwrap_or(0) / 8,
padding_bytes,
field_layout
);
let padding_align = if force_padding {
1
} else {
cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN)
};
if need_padding && padding_bytes != 0 {
Some(Layout::new(padding_bytes, padding_align))
} else {
None
}
};
self.latest_offset += field_layout.size;
self.latest_field_layout = Some(field_layout);
self.max_field_align =
cmp::max(self.max_field_align, field_layout.align);
self.last_field_was_bitfield = false;
debug!(
"Offset: {}: {} -> {}",
field_name,
self.latest_offset - field_layout.size,
self.latest_offset
);
padding_layout.map(|layout| self.padding_field(layout))
}
pub fn add_tail_padding(
&mut self,
comp_name: &str,
comp_layout: Layout,
) -> Option<proc_macro2::TokenStream> {
// Only emit an padding field at the end of a struct if the
// user configures explicit padding.
if !self.ctx.options().force_explicit_padding {
return None;
}
// Padding doesn't make sense for rust unions.
if self.is_rust_union {
return None;
}
if self.latest_offset == comp_layout.size {
// This struct does not contain tail padding.
return None;
}
trace!(
"need a tail padding field for {}: offset {} -> size {}",
comp_name,
self.latest_offset,
comp_layout.size
);
let size = comp_layout.size - self.latest_offset;
Some(self.padding_field(Layout::new(size, 0)))
}
pub fn pad_struct(
&mut self,
layout: Layout,
) -> Option<proc_macro2::TokenStream> {
debug!(
"pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
self, layout
);
if layout.size < self.latest_offset {
warn!(
"Calculated wrong layout for {}, too more {} bytes",
self.name,
self.latest_offset - layout.size
);
return None;
}
let padding_bytes = layout.size - self.latest_offset;
if padding_bytes == 0 {
return None;
}
let repr_align = self.ctx.options().rust_features().repr_align;
// We always pad to get to the correct size if the struct is one of
// those we can't align properly.
//
// Note that if the last field we saw was a bitfield, we may need to pad
// regardless, because bitfields don't respect alignment as strictly as
// other fields.
if padding_bytes >= layout.align ||
(self.last_field_was_bitfield &&
padding_bytes >= self.latest_field_layout.unwrap().align) ||
(!repr_align && layout.align > MAX_GUARANTEED_ALIGN)
{
let layout = if self.is_packed {
Layout::new(padding_bytes, 1)
} else if self.last_field_was_bitfield ||
layout.align > MAX_GUARANTEED_ALIGN
{
// We've already given up on alignment here.
Layout::for_size(self.ctx, padding_bytes)
} else {
Layout::new(padding_bytes, layout.align)
};
debug!("pad bytes to struct {}, {:?}", self.name, layout);
Some(self.padding_field(layout))
} else {
None
}
}
pub fn requires_explicit_align(&self, layout: Layout) -> bool {
let repr_align = self.ctx.options().rust_features().repr_align;
// Always force explicit repr(align) for stuff more than 16-byte aligned
// to work-around https://github.com/rust-lang/rust/issues/54341.
//
// Worst-case this just generates redundant alignment attributes.
if repr_align && self.max_field_align >= 16 {
return true;
}
if self.max_field_align >= layout.align {
return false;
}
// We can only generate up-to a 8-bytes of alignment unless we support
// repr(align).
repr_align || layout.align <= MAX_GUARANTEED_ALIGN
}
fn padding_bytes(&self, layout: Layout) -> usize {
align_to(self.latest_offset, layout.align) - self.latest_offset
}
fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
let ty = helpers::blob(self.ctx, layout);
let padding_count = self.padding_count;
self.padding_count += 1;
let padding_field_name = Ident::new(
&format!("__bindgen_padding_{}", padding_count),
Span::call_site(),
);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
quote! {
pub #padding_field_name : #ty ,
}
}
/// Returns whether the new field is known to merge with a bitfield.
///
/// This is just to avoid doing the same check also in pad_field.
fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
if self.is_packed {
// Skip to align fields when packed.
return false;
}
let layout = match self.latest_field_layout {
Some(l) => l,
None => return false,
};
// If it was, we may or may not need to align, depending on what the
// current field alignment and the bitfield size and alignment are.
debug!(
"align_to_bitfield? {}: {:?} {:?}",
self.last_field_was_bitfield, layout, new_field_layout
);
// Avoid divide-by-zero errors if align is 0.
let align = cmp::max(1, layout.align);
if self.last_field_was_bitfield &&
new_field_layout.align <= layout.size % align &&
new_field_layout.size <= layout.size % align
{
// The new field will be coalesced into some of the remaining bits.
//
// FIXME(emilio): I think this may not catch everything?
debug!("Will merge with bitfield");
return true;
}
// Else, just align the obvious way.
self.latest_offset += self.padding_bytes(layout);
false
}
}

20
third-party/vendor/bindgen/deps.rs vendored Normal file
View file

@ -0,0 +1,20 @@
/// Generating build depfiles from parsed bindings.
use std::{collections::BTreeSet, path::PathBuf};
#[derive(Clone, Debug)]
pub(crate) struct DepfileSpec {
pub output_module: String,
pub depfile_path: PathBuf,
}
impl DepfileSpec {
pub fn write(&self, deps: &BTreeSet<String>) -> std::io::Result<()> {
let mut buf = format!("{}:", self.output_module);
for file in deps {
buf = format!("{} {}", buf, file);
}
std::fs::write(&self.depfile_path, &buf)
}
}

View file

@ -0,0 +1,34 @@
//! Macros for defining extra assertions that should only be checked in testing
//! and/or CI when the `testing_only_extra_assertions` feature is enabled.
/// Simple macro that forwards to assert! when using
/// testing_only_extra_assertions.
#[macro_export]
macro_rules! extra_assert {
( $cond:expr ) => {
if cfg!(feature = "testing_only_extra_assertions") {
assert!($cond);
}
};
( $cond:expr , $( $arg:tt )+ ) => {
if cfg!(feature = "testing_only_extra_assertions") {
assert!($cond, $( $arg )* )
}
};
}
/// Simple macro that forwards to assert_eq! when using
/// testing_only_extra_assertions.
#[macro_export]
macro_rules! extra_assert_eq {
( $lhs:expr , $rhs:expr ) => {
if cfg!(feature = "testing_only_extra_assertions") {
assert_eq!($lhs, $rhs);
}
};
( $lhs:expr , $rhs:expr , $( $arg:tt )+ ) => {
if cfg!(feature = "testing_only_extra_assertions") {
assert!($lhs, $rhs, $( $arg )* );
}
};
}

317
third-party/vendor/bindgen/features.rs vendored Normal file
View file

@ -0,0 +1,317 @@
//! Contains code for selecting features
#![deny(missing_docs)]
#![deny(unused_extern_crates)]
#![allow(deprecated)]
use std::io;
use std::str::FromStr;
/// Define RustTarget struct definition, Default impl, and conversions
/// between RustTarget and String.
macro_rules! rust_target_def {
( $( $( #[$attr:meta] )* => $release:ident => $value:expr; )* ) => {
/// Represents the version of the Rust language to target.
///
/// To support a beta release, use the corresponding stable release.
///
/// This enum will have more variants added as necessary.
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Hash)]
#[allow(non_camel_case_types)]
pub enum RustTarget {
$(
$(
#[$attr]
)*
$release,
)*
}
impl Default for RustTarget {
/// Gives the latest stable Rust version
fn default() -> RustTarget {
LATEST_STABLE_RUST
}
}
impl FromStr for RustTarget {
type Err = io::Error;
/// Create a `RustTarget` from a string.
///
/// * The stable/beta versions of Rust are of the form "1.0",
/// "1.19", etc.
/// * The nightly version should be specified with "nightly".
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.as_ref() {
$(
stringify!($value) => Ok(RustTarget::$release),
)*
_ => Err(
io::Error::new(
io::ErrorKind::InvalidInput,
concat!(
"Got an invalid rust target. Accepted values ",
"are of the form ",
"\"1.0\" or \"nightly\"."))),
}
}
}
impl From<RustTarget> for String {
fn from(target: RustTarget) -> Self {
match target {
$(
RustTarget::$release => stringify!($value),
)*
}.into()
}
}
}
}
/// Defines an array slice with all RustTarget values
macro_rules! rust_target_values_def {
( $( $( #[$attr:meta] )* => $release:ident => $value:expr; )* ) => {
/// Strings of allowed `RustTarget` values
pub static RUST_TARGET_STRINGS: &'static [&str] = &[
$(
stringify!($value),
)*
];
}
}
/// Defines macro which takes a macro
macro_rules! rust_target_base {
( $x_macro:ident ) => {
$x_macro!(
/// Rust stable 1.0
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_0 => 1.0;
/// Rust stable 1.17
/// * Static lifetime elision ([RFC 1623](https://github.com/rust-lang/rfcs/blob/master/text/1623-static.md))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_17 => 1.17;
/// Rust stable 1.19
/// * Untagged unions ([RFC 1444](https://github.com/rust-lang/rfcs/blob/master/text/1444-union.md))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_19 => 1.19;
/// Rust stable 1.20
/// * Associated constants ([PR](https://github.com/rust-lang/rust/pull/42809))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_20 => 1.20;
/// Rust stable 1.21
/// * Builtin impls for `Clone` ([PR](https://github.com/rust-lang/rust/pull/43690))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_21 => 1.21;
/// Rust stable 1.25
/// * `repr(align)` ([PR](https://github.com/rust-lang/rust/pull/47006))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_25 => 1.25;
/// Rust stable 1.26
/// * [i128 / u128 support](https://doc.rust-lang.org/std/primitive.i128.html)
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_26 => 1.26;
/// Rust stable 1.27
/// * `must_use` attribute on functions ([PR](https://github.com/rust-lang/rust/pull/48925))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_27 => 1.27;
/// Rust stable 1.28
/// * `repr(transparent)` ([PR](https://github.com/rust-lang/rust/pull/51562))
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_28 => 1.28;
/// Rust stable 1.30
/// * `const fn` support for limited cases ([PR](https://github.com/rust-lang/rust/pull/54835/)
/// * [c_void available in core](https://doc.rust-lang.org/core/ffi/enum.c_void.html)
#[deprecated = "This rust target is deprecated. If you have a good reason to use this target please report it at https://github.com/rust-lang/rust-bindgen/issues"] => Stable_1_30 => 1.30;
/// Rust stable 1.33
/// * repr(packed(N)) ([PR](https://github.com/rust-lang/rust/pull/57049))
=> Stable_1_33 => 1.33;
/// Rust stable 1.36
/// * `MaybeUninit` instead of `mem::uninitialized()` ([PR](https://github.com/rust-lang/rust/pull/60445))
=> Stable_1_36 => 1.36;
/// Rust stable 1.40
/// * `non_exhaustive` enums/structs ([Tracking issue](https://github.com/rust-lang/rust/issues/44109))
=> Stable_1_40 => 1.40;
/// Rust stable 1.47
/// * `larger_arrays` ([Tracking issue](https://github.com/rust-lang/rust/pull/74060))
=> Stable_1_47 => 1.47;
/// Rust stable 1.64
/// * `core_ffi_c` ([Tracking issue](https://github.com/rust-lang/rust/issues/94501))
=> Stable_1_64 => 1.64;
/// Nightly rust
/// * `thiscall` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/42202))
/// * `vectorcall` calling convention (no tracking issue)
/// * `c_unwind` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/74990))
=> Nightly => nightly;
);
}
}
rust_target_base!(rust_target_def);
rust_target_base!(rust_target_values_def);
/// Latest stable release of Rust
pub const LATEST_STABLE_RUST: RustTarget = RustTarget::Stable_1_64;
/// Create RustFeatures struct definition, new(), and a getter for each field
macro_rules! rust_feature_def {
(
$( $rust_target:ident {
$( $( #[$attr:meta] )* => $feature:ident; )*
} )*
) => {
/// Features supported by a rust target
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
#[allow(missing_docs)] // Documentation should go into the relevant variants.
pub(crate) struct RustFeatures {
$( $(
$(
#[$attr]
)*
pub $feature: bool,
)* )*
}
impl RustFeatures {
/// Gives a RustFeatures struct with all features disabled
fn new() -> Self {
RustFeatures {
$( $(
$feature: false,
)* )*
}
}
}
impl From<RustTarget> for RustFeatures {
fn from(rust_target: RustTarget) -> Self {
let mut features = RustFeatures::new();
$(
if rust_target >= RustTarget::$rust_target {
$(
features.$feature = true;
)*
}
)*
features
}
}
}
}
// NOTE(emilio): When adding or removing features here, make sure to update the
// documentation for the relevant variant in the rust_target_base macro
// definition.
rust_feature_def!(
Stable_1_17 {
=> static_lifetime_elision;
}
Stable_1_19 {
=> untagged_union;
}
Stable_1_20 {
=> associated_const;
}
Stable_1_21 {
=> builtin_clone_impls;
}
Stable_1_25 {
=> repr_align;
}
Stable_1_26 {
=> i128_and_u128;
}
Stable_1_27 {
=> must_use_function;
}
Stable_1_28 {
=> repr_transparent;
}
Stable_1_30 {
=> min_const_fn;
=> core_ffi_c_void;
}
Stable_1_33 {
=> repr_packed_n;
}
Stable_1_36 {
=> maybe_uninit;
}
Stable_1_40 {
=> non_exhaustive;
}
Stable_1_47 {
=> larger_arrays;
}
Stable_1_64 {
=> core_ffi_c;
}
Nightly {
=> thiscall_abi;
=> vectorcall_abi;
=> c_unwind_abi;
}
);
impl Default for RustFeatures {
fn default() -> Self {
let default_rust_target: RustTarget = Default::default();
Self::from(default_rust_target)
}
}
#[cfg(test)]
mod test {
#![allow(unused_imports)]
use super::*;
#[test]
fn target_features() {
let f_1_0 = RustFeatures::from(RustTarget::Stable_1_0);
assert!(
!f_1_0.static_lifetime_elision &&
!f_1_0.core_ffi_c_void &&
!f_1_0.untagged_union &&
!f_1_0.associated_const &&
!f_1_0.builtin_clone_impls &&
!f_1_0.repr_align &&
!f_1_0.thiscall_abi &&
!f_1_0.vectorcall_abi
);
let f_1_21 = RustFeatures::from(RustTarget::Stable_1_21);
assert!(
f_1_21.static_lifetime_elision &&
!f_1_21.core_ffi_c_void &&
f_1_21.untagged_union &&
f_1_21.associated_const &&
f_1_21.builtin_clone_impls &&
!f_1_21.repr_align &&
!f_1_21.thiscall_abi &&
!f_1_21.vectorcall_abi
);
let f_nightly = RustFeatures::from(RustTarget::Nightly);
assert!(
f_nightly.static_lifetime_elision &&
f_nightly.core_ffi_c_void &&
f_nightly.untagged_union &&
f_nightly.associated_const &&
f_nightly.builtin_clone_impls &&
f_nightly.maybe_uninit &&
f_nightly.repr_align &&
f_nightly.thiscall_abi &&
f_nightly.vectorcall_abi &&
f_nightly.c_unwind_abi
);
}
fn test_target(target_str: &str, target: RustTarget) {
let target_string: String = target.into();
assert_eq!(target_str, target_string);
assert_eq!(target, RustTarget::from_str(target_str).unwrap());
}
#[test]
fn str_to_target() {
test_target("1.0", RustTarget::Stable_1_0);
test_target("1.17", RustTarget::Stable_1_17);
test_target("1.19", RustTarget::Stable_1_19);
test_target("1.21", RustTarget::Stable_1_21);
test_target("1.25", RustTarget::Stable_1_25);
test_target("nightly", RustTarget::Nightly);
}
}

View file

@ -0,0 +1,732 @@
//! Determining which types for which we cannot emit `#[derive(Trait)]`.
use std::fmt;
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::analysis::has_vtable::HasVtable;
use crate::ir::comp::CompKind;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::derive::CanDerive;
use crate::ir::function::FunctionSig;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::layout::Layout;
use crate::ir::template::TemplateParameters;
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use crate::ir::ty::{Type, TypeKind};
use crate::{Entry, HashMap, HashSet};
/// Which trait to consider when doing the `CannotDerive` analysis.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum DeriveTrait {
/// The `Copy` trait.
Copy,
/// The `Debug` trait.
Debug,
/// The `Default` trait.
Default,
/// The `Hash` trait.
Hash,
/// The `PartialEq` and `PartialOrd` traits.
PartialEqOrPartialOrd,
}
/// An analysis that finds for each IR item whether a trait cannot be derived.
///
/// We use the monotone constraint function `cannot_derive`, defined as follows
/// for type T:
///
/// * If T is Opaque and the layout of the type is known, get this layout as an
/// opaquetype and check whether it can derive using trivial checks.
///
/// * If T is Array, a trait cannot be derived if the array is incomplete,
/// if the length of the array is larger than the limit (unless the trait
/// allows it), or the trait cannot be derived for the type of data the array
/// contains.
///
/// * If T is Vector, a trait cannot be derived if the trait cannot be derived
/// for the type of data the vector contains.
///
/// * If T is a type alias, a templated alias or an indirection to another type,
/// the trait cannot be derived if the trait cannot be derived for type T
/// refers to.
///
/// * If T is a compound type, the trait cannot be derived if the trait cannot
/// be derived for any of its base members or fields.
///
/// * If T is an instantiation of an abstract template definition, the trait
/// cannot be derived if any of the template arguments or template definition
/// cannot derive the trait.
///
/// * For all other (simple) types, compiler and standard library limitations
/// dictate whether the trait is implemented.
#[derive(Debug, Clone)]
pub struct CannotDerive<'ctx> {
ctx: &'ctx BindgenContext,
derive_trait: DeriveTrait,
// The incremental result of this analysis's computation.
// Contains information whether particular item can derive `derive_trait`
can_derive: HashMap<ItemId, CanDerive>,
// Dependencies saying that if a key ItemId has been inserted into the
// `cannot_derive_partialeq_or_partialord` set, then each of the ids
// in Vec<ItemId> need to be considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// can derive `derive_trait`.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
type EdgePredicate = fn(EdgeKind) -> bool;
fn consider_edge_default(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type can derive
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
impl<'ctx> CannotDerive<'ctx> {
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
can_derive: CanDerive,
) -> ConstrainResult {
let id = id.into();
trace!(
"inserting {:?} can_derive<{}>={:?}",
id,
self.derive_trait,
can_derive
);
if let CanDerive::Yes = can_derive {
return ConstrainResult::Same;
}
match self.can_derive.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < can_derive {
entry.insert(can_derive);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(can_derive);
ConstrainResult::Changed
}
}
}
fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive {
if !self.ctx.allowlisted_items().contains(&item.id()) {
let can_derive = self
.ctx
.blocklisted_type_implements_trait(item, self.derive_trait);
match can_derive {
CanDerive::Yes => trace!(
" blocklisted type explicitly implements {}",
self.derive_trait
),
CanDerive::Manually => trace!(
" blocklisted type requires manual implementation of {}",
self.derive_trait
),
CanDerive::No => trace!(
" cannot derive {} for blocklisted type",
self.derive_trait
),
}
return can_derive;
}
if self.derive_trait.not_by_name(self.ctx, item) {
trace!(
" cannot derive {} for explicitly excluded type",
self.derive_trait
);
return CanDerive::No;
}
trace!("ty: {:?}", ty);
if item.is_opaque(self.ctx, &()) {
if !self.derive_trait.can_derive_union() &&
ty.is_union() &&
self.ctx.options().rust_features().untagged_union
{
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit(self.ctx)
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" we can trivially derive {} for the layout",
self.derive_trait
);
}
_ => {
trace!(
" we cannot derive {} for the layout",
self.derive_trait
);
}
};
return layout_can_derive;
}
match *ty.kind() {
// Handle the simple cases. These can derive traits without further
// information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
return self.derive_trait.can_derive_simple(ty.kind());
}
TypeKind::Pointer(inner) => {
let inner_type =
self.ctx.resolve_type(inner).canonical_type(self.ctx);
if let TypeKind::Function(ref sig) = *inner_type.kind() {
self.derive_trait.can_derive_fnptr(sig)
} else {
self.derive_trait.can_derive_pointer()
}
}
TypeKind::Function(ref sig) => {
self.derive_trait.can_derive_fnptr(sig)
}
// Complex cases need more information
TypeKind::Array(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).cloned().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" arrays of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
if len == 0 && !self.derive_trait.can_derive_incomplete_array()
{
trace!(
" cannot derive {} for incomplete arrays",
self.derive_trait
);
return CanDerive::No;
}
if self.derive_trait.can_derive_large_array(self.ctx) {
trace!(" array can derive {}", self.derive_trait);
return CanDerive::Yes;
}
if len > RUST_DERIVE_IN_ARRAY_LIMIT {
trace!(
" array is too large to derive {}, but it may be implemented", self.derive_trait
);
return CanDerive::Manually;
}
trace!(
" array is small enough to derive {}",
self.derive_trait
);
CanDerive::Yes
}
TypeKind::Vector(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).cloned().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" vectors of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
assert_ne!(len, 0, "vectors cannot have zero length");
self.derive_trait.can_derive_vector()
}
TypeKind::Comp(ref info) => {
assert!(
!info.has_non_type_template_params(),
"The early ty.is_opaque check should have handled this case"
);
if !self.derive_trait.can_derive_compound_forward_decl() &&
info.is_forward_declaration()
{
trace!(
" cannot derive {} for forward decls",
self.derive_trait
);
return CanDerive::No;
}
// NOTE: Take into account that while unions in C and C++ are copied by
// default, the may have an explicit destructor in C++, so we can't
// defer this check just for the union case.
if !self.derive_trait.can_derive_compound_with_destructor() &&
self.ctx.lookup_has_destructor(
item.id().expect_type_id(self.ctx),
)
{
trace!(
" comp has destructor which cannot derive {}",
self.derive_trait
);
return CanDerive::No;
}
if info.kind() == CompKind::Union {
if self.derive_trait.can_derive_union() {
if self.ctx.options().rust_features().untagged_union &&
// https://github.com/rust-lang/rust/issues/36640
(!info.self_template_params(self.ctx).is_empty() ||
!item.all_template_params(self.ctx).is_empty())
{
trace!(
" cannot derive {} for Rust union because issue 36640", self.derive_trait
);
return CanDerive::No;
}
// fall through to be same as non-union handling
} else {
if self.ctx.options().rust_features().untagged_union {
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque()
.array_size_within_derive_limit(self.ctx)
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" union layout can trivially derive {}",
self.derive_trait
);
}
_ => {
trace!(
" union layout cannot derive {}",
self.derive_trait
);
}
};
return layout_can_derive;
}
}
if !self.derive_trait.can_derive_compound_with_vtable() &&
item.has_vtable(self.ctx)
{
trace!(
" cannot derive {} for comp with vtable",
self.derive_trait
);
return CanDerive::No;
}
// Bitfield units are always represented as arrays of u8, but
// they're not traced as arrays, so we need to check here
// instead.
if !self.derive_trait.can_derive_large_array(self.ctx) &&
info.has_too_large_bitfield_unit() &&
!item.is_opaque(self.ctx, &())
{
trace!(
" cannot derive {} for comp with too large bitfield unit",
self.derive_trait
);
return CanDerive::No;
}
let pred = self.derive_trait.consider_edge_comp();
self.constrain_join(item, pred)
}
TypeKind::ResolvedTypeRef(..) |
TypeKind::TemplateAlias(..) |
TypeKind::Alias(..) |
TypeKind::BlockPointer(..) => {
let pred = self.derive_trait.consider_edge_typeref();
self.constrain_join(item, pred)
}
TypeKind::TemplateInstantiation(..) => {
let pred = self.derive_trait.consider_edge_tmpl_inst();
self.constrain_join(item, pred)
}
TypeKind::Opaque => unreachable!(
"The early ty.is_opaque check should have handled this case"
),
}
}
fn constrain_join(
&mut self,
item: &Item,
consider_edge: EdgePredicate,
) -> CanDerive {
let mut candidate = None;
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !consider_edge(edge_kind) {
return;
}
let can_derive = self.can_derive
.get(&sub_id)
.cloned()
.unwrap_or_default();
match can_derive {
CanDerive::Yes => trace!(" member {:?} can derive {}", sub_id, self.derive_trait),
CanDerive::Manually => trace!(" member {:?} cannot derive {}, but it may be implemented", sub_id, self.derive_trait),
CanDerive::No => trace!(" member {:?} cannot derive {}", sub_id, self.derive_trait),
}
*candidate.get_or_insert(CanDerive::Yes) |= can_derive;
},
&(),
);
if candidate.is_none() {
trace!(
" can derive {} because there are no members",
self.derive_trait
);
}
candidate.unwrap_or_default()
}
}
impl DeriveTrait {
fn not_by_name(&self, ctx: &BindgenContext, item: &Item) -> bool {
match self {
DeriveTrait::Copy => ctx.no_copy_by_name(item),
DeriveTrait::Debug => ctx.no_debug_by_name(item),
DeriveTrait::Default => ctx.no_default_by_name(item),
DeriveTrait::Hash => ctx.no_hash_by_name(item),
DeriveTrait::PartialEqOrPartialOrd => {
ctx.no_partialeq_by_name(item)
}
}
}
fn consider_edge_comp(&self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field),
}
}
fn consider_edge_typeref(&self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| kind == EdgeKind::TypeReference,
}
}
fn consider_edge_tmpl_inst(&self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| {
matches!(
kind,
EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration
)
},
}
}
fn can_derive_large_array(&self, ctx: &BindgenContext) -> bool {
if ctx.options().rust_features().larger_arrays {
!matches!(self, DeriveTrait::Default)
} else {
matches!(self, DeriveTrait::Copy)
}
}
fn can_derive_union(&self) -> bool {
matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_destructor(&self) -> bool {
!matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_vtable(&self) -> bool {
!matches!(self, DeriveTrait::Default)
}
fn can_derive_compound_forward_decl(&self) -> bool {
matches!(self, DeriveTrait::Copy | DeriveTrait::Debug)
}
fn can_derive_incomplete_array(&self) -> bool {
!matches!(
self,
DeriveTrait::Copy |
DeriveTrait::Hash |
DeriveTrait::PartialEqOrPartialOrd
)
}
fn can_derive_fnptr(&self, f: &FunctionSig) -> CanDerive {
match (self, f.function_pointers_can_derive()) {
(DeriveTrait::Copy, _) | (DeriveTrait::Default, _) | (_, true) => {
trace!(" function pointer can derive {}", self);
CanDerive::Yes
}
(DeriveTrait::Debug, false) => {
trace!(" function pointer cannot derive {}, but it may be implemented", self);
CanDerive::Manually
}
(_, false) => {
trace!(" function pointer cannot derive {}", self);
CanDerive::No
}
}
}
fn can_derive_vector(&self) -> CanDerive {
match self {
DeriveTrait::PartialEqOrPartialOrd => {
// FIXME: vectors always can derive PartialEq, but they should
// not derive PartialOrd:
// https://github.com/rust-lang-nursery/packed_simd/issues/48
trace!(" vectors cannot derive PartialOrd");
CanDerive::No
}
_ => {
trace!(" vector can derive {}", self);
CanDerive::Yes
}
}
}
fn can_derive_pointer(&self) -> CanDerive {
match self {
DeriveTrait::Default => {
trace!(" pointer cannot derive Default");
CanDerive::No
}
_ => {
trace!(" pointer can derive {}", self);
CanDerive::Yes
}
}
}
fn can_derive_simple(&self, kind: &TypeKind) -> CanDerive {
match (self, kind) {
// === Default ===
(DeriveTrait::Default, TypeKind::Void) |
(DeriveTrait::Default, TypeKind::NullPtr) |
(DeriveTrait::Default, TypeKind::Enum(..)) |
(DeriveTrait::Default, TypeKind::Reference(..)) |
(DeriveTrait::Default, TypeKind::TypeParam) |
(DeriveTrait::Default, TypeKind::ObjCInterface(..)) |
(DeriveTrait::Default, TypeKind::ObjCId) |
(DeriveTrait::Default, TypeKind::ObjCSel) => {
trace!(" types that always cannot derive Default");
CanDerive::No
}
(DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => {
unreachable!(
"Type with unresolved type ref can't reach derive default"
)
}
// === Hash ===
(DeriveTrait::Hash, TypeKind::Float(..)) |
(DeriveTrait::Hash, TypeKind::Complex(..)) => {
trace!(" float cannot derive Hash");
CanDerive::No
}
// === others ===
_ => {
trace!(" simple type that can always derive {}", self);
CanDerive::Yes
}
}
}
}
impl fmt::Display for DeriveTrait {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match self {
DeriveTrait::Copy => "Copy",
DeriveTrait::Debug => "Debug",
DeriveTrait::Default => "Default",
DeriveTrait::Hash => "Hash",
DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd",
};
s.fmt(f)
}
}
impl<'ctx> MonotoneFramework for CannotDerive<'ctx> {
type Node = ItemId;
type Extra = (&'ctx BindgenContext, DeriveTrait);
type Output = HashMap<ItemId, CanDerive>;
fn new(
(ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait),
) -> CannotDerive<'ctx> {
let can_derive = HashMap::default();
let dependencies = generate_dependencies(ctx, consider_edge_default);
CannotDerive {
ctx,
derive_trait,
can_derive,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.cloned()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {:?}", id);
if let Some(CanDerive::No) = self.can_derive.get(&id).cloned() {
trace!(" already know it cannot derive {}", self.derive_trait);
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let can_derive = match item.as_type() {
Some(ty) => {
let mut can_derive = self.constrain_type(item, ty);
if let CanDerive::Yes = can_derive {
let is_reached_limit =
|l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT;
if !self.derive_trait.can_derive_large_array(self.ctx) &&
ty.layout(self.ctx).map_or(false, is_reached_limit)
{
// We have to be conservative: the struct *could* have enough
// padding that we emit an array that is longer than
// `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations
// into the IR and computed them before this analysis, then we could
// be precise rather than conservative here.
can_derive = CanDerive::Manually;
}
}
can_derive
}
None => self.constrain_join(item, consider_edge_default),
};
self.insert(id, can_derive)
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {:?} into worklist", item);
f(*item);
}
}
}
}
impl<'ctx> From<CannotDerive<'ctx>> for HashMap<ItemId, CanDerive> {
fn from(analysis: CannotDerive<'ctx>) -> Self {
extra_assert!(analysis
.can_derive
.values()
.all(|v| *v != CanDerive::Yes));
analysis.can_derive
}
}
/// Convert a `HashMap<ItemId, CanDerive>` into a `HashSet<ItemId>`.
///
/// Elements that are not `CanDerive::Yes` are kept in the set, so that it
/// represents all items that cannot derive.
pub fn as_cannot_derive_set(
can_derive: HashMap<ItemId, CanDerive>,
) -> HashSet<ItemId> {
can_derive
.into_iter()
.filter_map(|(k, v)| if v != CanDerive::Yes { Some(k) } else { None })
.collect()
}

View file

@ -0,0 +1,176 @@
//! Determining which types have destructors
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::{CompKind, Field, FieldMethods};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has a destructor or not
///
/// We use the monotone function `has destructor`, defined as follows:
///
/// * If T is a type alias, a templated alias, or an indirection to another type,
/// T has a destructor if the type T refers to has a destructor.
/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it,
/// or if it's a struct, T has a destructor if any of its base members has a destructor,
/// or if any of its fields have a destructor.
/// * If T is an instantiation of an abstract template definition, T has
/// a destructor if its template definition has a destructor,
/// or if any of the template arguments has a destructor.
/// * If T is the type of a field, that field has a destructor if it's not a bitfield,
/// and if T has a destructor.
#[derive(Debug, Clone)]
pub struct HasDestructorAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a destructor.
have_destructor: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_destructor` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a destructor or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl<'ctx> HasDestructorAnalysis<'ctx> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// destructor or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
let was_not_already_in_set = self.have_destructor.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {:?} twice because if it was \
already in the set, `constrain` should have exited early.",
id
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> Self {
let have_destructor = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasDestructorAnalysis {
ctx,
have_destructor,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().cloned().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
if self.have_destructor.contains(&id) {
// We've already computed that this type has a destructor and that can't
// change.
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) => {
if self.have_destructor.contains(&t.into()) {
self.insert(id)
} else {
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
if info.has_own_destructor() {
return self.insert(id);
}
match info.kind() {
CompKind::Union => ConstrainResult::Same,
CompKind::Struct => {
let base_or_field_destructor =
info.base_members().iter().any(|base| {
self.have_destructor.contains(&base.ty.into())
}) || info.fields().iter().any(
|field| match *field {
Field::DataMember(ref data) => self
.have_destructor
.contains(&data.ty().into()),
Field::Bitfields(_) => false,
},
);
if base_or_field_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
}
}
TypeKind::TemplateInstantiation(ref inst) => {
let definition_or_arg_destructor = self
.have_destructor
.contains(&inst.template_definition().into()) ||
inst.template_arguments().iter().any(|arg| {
self.have_destructor.contains(&arg.into())
});
if definition_or_arg_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {:?} into worklist", item);
f(*item);
}
}
}
}
impl<'ctx> From<HasDestructorAnalysis<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self {
analysis.have_destructor
}
}

View file

@ -0,0 +1,252 @@
//! Determining which types has float.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has float or not.
///
/// We use the monotone constraint function `has_float`,
/// defined as follows:
///
/// * If T is float or complex float, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has float if the type T refers to has.
/// * If T is a compound type, it has float if any of base memter or field
/// has.
/// * If T is an instantiation of an abstract template definition, T has
/// float if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub struct HasFloat<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has float.
has_float: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_float` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has float or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl<'ctx> HasFloat<'ctx> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method => false,
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {:?} into the has_float set", id);
let was_not_already_in_set = self.has_float.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {:?} twice because if it was \
already in the set, `constrain` should have exited early.",
id
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasFloat<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> {
let has_float = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasFloat {
ctx,
has_float,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().cloned().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {:?}", id);
if self.has_float.contains(&id) {
trace!(" already know it do not have float");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
Some(ty) => ty,
None => {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
}
};
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have float");
ConstrainResult::Same
}
TypeKind::Float(..) | TypeKind::Complex(..) => {
trace!(" float type has float");
self.insert(id)
}
TypeKind::Array(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Array with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Array with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::Vector(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Vector with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Vector with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_float.contains(&t.into()) {
trace!(
" aliases and type refs to T which have float \
also have float"
);
self.insert(id)
} else {
trace!(" aliases and type refs to T which do not have float \
also do not have floaarrayt");
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info
.base_members()
.iter()
.any(|base| self.has_float.contains(&base.ty.into()));
if bases_have {
trace!(" bases have float, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => {
self.has_float.contains(&data.ty().into())
}
Field::Bitfields(ref bfu) => bfu
.bitfields()
.iter()
.any(|b| self.has_float.contains(&b.ty().into())),
});
if fields_have {
trace!(" fields have float, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have float");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have = template
.template_arguments()
.iter()
.any(|arg| self.has_float.contains(&arg.into()));
if args_have {
trace!(
" template args have float, so \
insantiation also has float"
);
return self.insert(id);
}
let def_has = self
.has_float
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has float, so \
insantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have float");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {:?} into worklist", item);
f(*item);
}
}
}
}
impl<'ctx> From<HasFloat<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasFloat<'ctx>) -> Self {
analysis.has_float
}
}

View file

@ -0,0 +1,252 @@
//! Determining which types has typed parameters in array.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has array or not.
///
/// We use the monotone constraint function `has_type_parameter_in_array`,
/// defined as follows:
///
/// * If T is Array type with type parameter, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has type parameter in array if the type T refers to has.
/// * If T is a compound type, it has array if any of base memter or field
/// has type paramter in array.
/// * If T is an instantiation of an abstract template definition, T has
/// type parameter in array if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub struct HasTypeParameterInArray<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has array.
has_type_parameter_in_array: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_type_parameter_in_array` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has array or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl<'ctx> HasTypeParameterInArray<'ctx> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type has type parameter
// in array or not.
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method => false,
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!(
"inserting {:?} into the has_type_parameter_in_array set",
id
);
let was_not_already_in_set =
self.has_type_parameter_in_array.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {:?} twice because if it was \
already in the set, `constrain` should have exited early.",
id
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> {
let has_type_parameter_in_array = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasTypeParameterInArray {
ctx,
has_type_parameter_in_array,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().cloned().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {:?}", id);
if self.has_type_parameter_in_array.contains(&id) {
trace!(" already know it do not have array");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
Some(ty) => ty,
None => {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
}
};
match *ty.kind() {
// Handle the simple cases. These cannot have array in type parameter
// without further information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Vector(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have array");
ConstrainResult::Same
}
TypeKind::Array(t, _) => {
let inner_ty =
self.ctx.resolve_type(t).canonical_type(self.ctx);
match *inner_ty.kind() {
TypeKind::TypeParam => {
trace!(" Array with Named type has type parameter");
self.insert(id)
}
_ => {
trace!(
" Array without Named type does have type parameter"
);
ConstrainResult::Same
}
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_type_parameter_in_array.contains(&t.into()) {
trace!(
" aliases and type refs to T which have array \
also have array"
);
self.insert(id)
} else {
trace!(
" aliases and type refs to T which do not have array \
also do not have array"
);
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info.base_members().iter().any(|base| {
self.has_type_parameter_in_array.contains(&base.ty.into())
});
if bases_have {
trace!(" bases have array, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => self
.has_type_parameter_in_array
.contains(&data.ty().into()),
Field::Bitfields(..) => false,
});
if fields_have {
trace!(" fields have array, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have array");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have =
template.template_arguments().iter().any(|arg| {
self.has_type_parameter_in_array.contains(&arg.into())
});
if args_have {
trace!(
" template args have array, so \
insantiation also has array"
);
return self.insert(id);
}
let def_has = self
.has_type_parameter_in_array
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has array, so \
insantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have array");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {:?} into worklist", item);
f(*item);
}
}
}
}
impl<'ctx> From<HasTypeParameterInArray<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self {
analysis.has_type_parameter_in_array
}
}

View file

@ -0,0 +1,240 @@
//! Determining which types has vtable
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::cmp;
use std::ops;
/// The result of the `HasVtableAnalysis` for an individual item.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum HasVtableResult {
/// The item does not have a vtable pointer.
No,
/// The item has a vtable and the actual vtable pointer is within this item.
SelfHasVtable,
/// The item has a vtable, but the actual vtable pointer is in a base
/// member.
BaseHasVtable,
}
impl Default for HasVtableResult {
fn default() -> Self {
HasVtableResult::No
}
}
impl HasVtableResult {
/// Take the least upper bound of `self` and `rhs`.
pub fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for HasVtableResult {
type Output = Self;
fn bitor(self, rhs: HasVtableResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for HasVtableResult {
fn bitor_assign(&mut self, rhs: HasVtableResult) {
*self = self.join(rhs)
}
}
/// An analysis that finds for each IR item whether it has vtable or not
///
/// We use the monotone function `has vtable`, defined as follows:
///
/// * If T is a type alias, a templated alias, an indirection to another type,
/// or a reference of a type, T has vtable if the type T refers to has vtable.
/// * If T is a compound type, T has vtable if we saw a virtual function when
/// parsing it or any of its base member has vtable.
/// * If T is an instantiation of an abstract template definition, T has
/// vtable if template definition has vtable
#[derive(Debug, Clone)]
pub struct HasVtableAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a vtable.
have_vtable: HashMap<ItemId, HasVtableResult>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_vtable` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a vtable or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl<'ctx> HasVtableAnalysis<'ctx> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// vtable or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
result: HasVtableResult,
) -> ConstrainResult {
if let HasVtableResult::No = result {
return ConstrainResult::Same;
}
let id = id.into();
match self.have_vtable.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward<Id1, Id2>(&mut self, from: Id1, to: Id2) -> ConstrainResult
where
Id1: Into<ItemId>,
Id2: Into<ItemId>,
{
let from = from.into();
let to = to.into();
match self.have_vtable.get(&from).cloned() {
None => ConstrainResult::Same,
Some(r) => self.insert(to, r),
}
}
}
impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, HasVtableResult>;
fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> {
let have_vtable = HashMap::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasVtableAnalysis {
ctx,
have_vtable,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().cloned().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain {:?}", id);
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
// TODO #851: figure out a way to handle deriving from template type parameters.
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) |
TypeKind::Reference(t) => {
trace!(
" aliases and references forward to their inner type"
);
self.forward(t, id)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own methods and bases");
let mut result = HasVtableResult::No;
if info.has_own_virtual_method() {
trace!(" comp has its own virtual method");
result |= HasVtableResult::SelfHasVtable;
}
let bases_has_vtable = info.base_members().iter().any(|base| {
trace!(" comp has a base with a vtable: {:?}", base);
self.have_vtable.contains_key(&base.ty.into())
});
if bases_has_vtable {
result |= HasVtableResult::BaseHasVtable;
}
self.insert(id, result)
}
TypeKind::TemplateInstantiation(ref inst) => {
self.forward(inst.template_definition(), id)
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {:?} into worklist", item);
f(*item);
}
}
}
}
impl<'ctx> From<HasVtableAnalysis<'ctx>> for HashMap<ItemId, HasVtableResult> {
fn from(analysis: HasVtableAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "No" to save space.
extra_assert!(analysis
.have_vtable
.values()
.all(|v| { *v != HasVtableResult::No }));
analysis.have_vtable
}
}
/// A convenience trait for the things for which we might wonder if they have a
/// vtable during codegen.
///
/// This is not for _computing_ whether the thing has a vtable, it is for
/// looking up the results of the HasVtableAnalysis's computations for a
/// specific thing.
pub trait HasVtable {
/// Return `true` if this thing has vtable, `false` otherwise.
fn has_vtable(&self, ctx: &BindgenContext) -> bool;
/// Return `true` if this thing has an actual vtable pointer in itself, as
/// opposed to transitively in a base member.
fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool;
}

View file

@ -0,0 +1,402 @@
//! Fix-point analyses on the IR using the "monotone framework".
//!
//! A lattice is a set with a partial ordering between elements, where there is
//! a single least upper bound and a single greatest least bound for every
//! subset. We are dealing with finite lattices, which means that it has a
//! finite number of elements, and it follows that there exists a single top and
//! a single bottom member of the lattice. For example, the power set of a
//! finite set forms a finite lattice where partial ordering is defined by set
//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite
//! lattice constructed from the set {0,1,2}:
//!
//! ```text
//! .----- Top = {0,1,2} -----.
//! / | \
//! / | \
//! / | \
//! {0,1} -------. {0,2} .--------- {1,2}
//! | \ / \ / |
//! | / \ |
//! | / \ / \ |
//! {0} --------' {1} `---------- {2}
//! \ | /
//! \ | /
//! \ | /
//! `------ Bottom = {} ------'
//! ```
//!
//! A monotone function `f` is a function where if `x <= y`, then it holds that
//! `f(x) <= f(y)`. It should be clear that running a monotone function to a
//! fix-point on a finite lattice will always terminate: `f` can only "move"
//! along the lattice in a single direction, and therefore can only either find
//! a fix-point in the middle of the lattice or continue to the top or bottom
//! depending if it is ascending or descending the lattice respectively.
//!
//! For a deeper introduction to the general form of this kind of analysis, see
//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa].
//!
//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf
// Re-export individual analyses.
mod template_params;
pub use self::template_params::UsedTemplateParameters;
mod derive;
pub use self::derive::{as_cannot_derive_set, CannotDerive, DeriveTrait};
mod has_vtable;
pub use self::has_vtable::{HasVtable, HasVtableAnalysis, HasVtableResult};
mod has_destructor;
pub use self::has_destructor::HasDestructorAnalysis;
mod has_type_param_in_array;
pub use self::has_type_param_in_array::HasTypeParameterInArray;
mod has_float;
pub use self::has_float::HasFloat;
mod sizedness;
pub use self::sizedness::{Sizedness, SizednessAnalysis, SizednessResult};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::HashMap;
use std::fmt;
use std::ops;
/// An analysis in the monotone framework.
///
/// Implementors of this trait must maintain the following two invariants:
///
/// 1. The concrete data must be a member of a finite-height lattice.
/// 2. The concrete `constrain` method must be monotone: that is,
/// if `x <= y`, then `constrain(x) <= constrain(y)`.
///
/// If these invariants do not hold, iteration to a fix-point might never
/// complete.
///
/// For a simple example analysis, see the `ReachableFrom` type in the `tests`
/// module below.
pub trait MonotoneFramework: Sized + fmt::Debug {
/// The type of node in our dependency graph.
///
/// This is just generic (and not `ItemId`) so that we can easily unit test
/// without constructing real `Item`s and their `ItemId`s.
type Node: Copy;
/// Any extra data that is needed during computation.
///
/// Again, this is just generic (and not `&BindgenContext`) so that we can
/// easily unit test without constructing real `BindgenContext`s full of
/// real `Item`s and real `ItemId`s.
type Extra: Sized;
/// The final output of this analysis. Once we have reached a fix-point, we
/// convert `self` into this type, and return it as the final result of the
/// analysis.
type Output: From<Self> + fmt::Debug;
/// Construct a new instance of this analysis.
fn new(extra: Self::Extra) -> Self;
/// Get the initial set of nodes from which to start the analysis. Unless
/// you are sure of some domain-specific knowledge, this should be the
/// complete set of nodes.
fn initial_worklist(&self) -> Vec<Self::Node>;
/// Update the analysis for the given node.
///
/// If this results in changing our internal state (ie, we discovered that
/// we have not reached a fix-point and iteration should continue), return
/// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`.
/// When `constrain` returns `ConstrainResult::Same` for all nodes in the
/// set, we have reached a fix-point and the analysis is complete.
fn constrain(&mut self, node: Self::Node) -> ConstrainResult;
/// For each node `d` that depends on the given `node`'s current answer when
/// running `constrain(d)`, call `f(d)`. This informs us which new nodes to
/// queue up in the worklist when `constrain(node)` reports updated
/// information.
fn each_depending_on<F>(&self, node: Self::Node, f: F)
where
F: FnMut(Self::Node);
}
/// Whether an analysis's `constrain` function modified the incremental results
/// or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ConstrainResult {
/// The incremental results were updated, and the fix-point computation
/// should continue.
Changed,
/// The incremental results were not updated.
Same,
}
impl Default for ConstrainResult {
fn default() -> Self {
ConstrainResult::Same
}
}
impl ops::BitOr for ConstrainResult {
type Output = Self;
fn bitor(self, rhs: ConstrainResult) -> Self::Output {
if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed {
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
}
impl ops::BitOrAssign for ConstrainResult {
fn bitor_assign(&mut self, rhs: ConstrainResult) {
*self = *self | rhs;
}
}
/// Run an analysis in the monotone framework.
pub fn analyze<Analysis>(extra: Analysis::Extra) -> Analysis::Output
where
Analysis: MonotoneFramework,
{
let mut analysis = Analysis::new(extra);
let mut worklist = analysis.initial_worklist();
while let Some(node) = worklist.pop() {
if let ConstrainResult::Changed = analysis.constrain(node) {
analysis.each_depending_on(node, |needs_work| {
worklist.push(needs_work);
});
}
}
analysis.into()
}
/// Generate the dependency map for analysis
pub fn generate_dependencies<F>(
ctx: &BindgenContext,
consider_edge: F,
) -> HashMap<ItemId, Vec<ItemId>>
where
F: Fn(EdgeKind) -> bool,
{
let mut dependencies = HashMap::default();
for &item in ctx.allowlisted_items() {
dependencies.entry(item).or_insert_with(Vec::new);
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, edge_kind| {
if ctx.allowlisted_items().contains(&sub_item) &&
consider_edge(edge_kind)
{
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
}
},
&(),
);
}
}
dependencies
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{HashMap, HashSet};
// Here we find the set of nodes that are reachable from any given
// node. This is a lattice mapping nodes to subsets of all nodes. Our join
// function is set union.
//
// This is our test graph:
//
// +---+ +---+
// | | | |
// | 1 | .----| 2 |
// | | | | |
// +---+ | +---+
// | | ^
// | | |
// | +---+ '------'
// '----->| |
// | 3 |
// .------| |------.
// | +---+ |
// | ^ |
// v | v
// +---+ | +---+ +---+
// | | | | | | |
// | 4 | | | 5 |--->| 6 |
// | | | | | | |
// +---+ | +---+ +---+
// | | | |
// | | | v
// | +---+ | +---+
// | | | | | |
// '----->| 7 |<-----' | 8 |
// | | | |
// +---+ +---+
//
// And here is the mapping from a node to the set of nodes that are
// reachable from it within the test graph:
//
// 1: {3,4,5,6,7,8}
// 2: {2}
// 3: {3,4,5,6,7,8}
// 4: {3,4,5,6,7,8}
// 5: {3,4,5,6,7,8}
// 6: {8}
// 7: {3,4,5,6,7,8}
// 8: {}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
struct Node(usize);
#[derive(Clone, Debug, Default, PartialEq, Eq)]
struct Graph(HashMap<Node, Vec<Node>>);
impl Graph {
fn make_test_graph() -> Graph {
let mut g = Graph::default();
g.0.insert(Node(1), vec![Node(3)]);
g.0.insert(Node(2), vec![Node(2)]);
g.0.insert(Node(3), vec![Node(4), Node(5)]);
g.0.insert(Node(4), vec![Node(7)]);
g.0.insert(Node(5), vec![Node(6), Node(7)]);
g.0.insert(Node(6), vec![Node(8)]);
g.0.insert(Node(7), vec![Node(3)]);
g.0.insert(Node(8), vec![]);
g
}
fn reverse(&self) -> Graph {
let mut reversed = Graph::default();
for (node, edges) in self.0.iter() {
reversed.0.entry(*node).or_insert_with(Vec::new);
for referent in edges.iter() {
reversed
.0
.entry(*referent)
.or_insert_with(Vec::new)
.push(*node);
}
}
reversed
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ReachableFrom<'a> {
reachable: HashMap<Node, HashSet<Node>>,
graph: &'a Graph,
reversed: Graph,
}
impl<'a> MonotoneFramework for ReachableFrom<'a> {
type Node = Node;
type Extra = &'a Graph;
type Output = HashMap<Node, HashSet<Node>>;
fn new(graph: &'a Graph) -> ReachableFrom {
let reversed = graph.reverse();
ReachableFrom {
reachable: Default::default(),
graph,
reversed,
}
}
fn initial_worklist(&self) -> Vec<Node> {
self.graph.0.keys().cloned().collect()
}
fn constrain(&mut self, node: Node) -> ConstrainResult {
// The set of nodes reachable from a node `x` is
//
// reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ...
//
// where there exist edges from `x` to each of `s_0, s_1, ...`.
//
// Yes, what follows is a **terribly** inefficient set union
// implementation. Don't copy this code outside of this test!
let original_size = self
.reachable
.entry(node)
.or_insert_with(HashSet::default)
.len();
for sub_node in self.graph.0[&node].iter() {
self.reachable.get_mut(&node).unwrap().insert(*sub_node);
let sub_reachable = self
.reachable
.entry(*sub_node)
.or_insert_with(HashSet::default)
.clone();
for transitive in sub_reachable {
self.reachable.get_mut(&node).unwrap().insert(transitive);
}
}
let new_size = self.reachable[&node].len();
if original_size != new_size {
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
fn each_depending_on<F>(&self, node: Node, mut f: F)
where
F: FnMut(Node),
{
for dep in self.reversed.0[&node].iter() {
f(*dep);
}
}
}
impl<'a> From<ReachableFrom<'a>> for HashMap<Node, HashSet<Node>> {
fn from(reachable: ReachableFrom<'a>) -> Self {
reachable.reachable
}
}
#[test]
fn monotone() {
let g = Graph::make_test_graph();
let reachable = analyze::<ReachableFrom>(&g);
println!("reachable = {:#?}", reachable);
fn nodes<A>(nodes: A) -> HashSet<Node>
where
A: AsRef<[usize]>,
{
nodes.as_ref().iter().cloned().map(Node).collect()
}
let mut expected = HashMap::default();
expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(2), nodes([2]));
expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(6), nodes([8]));
expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(8), nodes([]));
println!("expected = {:#?}", expected);
assert_eq!(reachable, expected);
}
}

View file

@ -0,0 +1,361 @@
//! Determining the sizedness of types (as base classes and otherwise).
use super::{
generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework,
};
use crate::ir::context::{BindgenContext, TypeId};
use crate::ir::item::IsOpaque;
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::{cmp, ops};
/// The result of the `Sizedness` analysis for an individual item.
///
/// This is a chain lattice of the form:
///
/// ```ignore
/// NonZeroSized
/// |
/// DependsOnTypeParam
/// |
/// ZeroSized
/// ```
///
/// We initially assume that all types are `ZeroSized` and then update our
/// understanding as we learn more about each type.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum SizednessResult {
/// The type is zero-sized.
///
/// This means that if it is a C++ type, and is not being used as a base
/// member, then we must add an `_address` byte to enforce the
/// unique-address-per-distinct-object-instance rule.
ZeroSized,
/// Whether this type is zero-sized or not depends on whether a type
/// parameter is zero-sized or not.
///
/// For example, given these definitions:
///
/// ```c++
/// template<class T>
/// class Flongo : public T {};
///
/// class Empty {};
///
/// class NonEmpty { int x; };
/// ```
///
/// Then `Flongo<Empty>` is zero-sized, and needs an `_address` byte
/// inserted, while `Flongo<NonEmpty>` is *not* zero-sized, and should *not*
/// have an `_address` byte inserted.
///
/// We don't properly handle this situation correctly right now:
/// https://github.com/rust-lang/rust-bindgen/issues/586
DependsOnTypeParam,
/// Has some size that is known to be greater than zero. That doesn't mean
/// it has a static size, but it is not zero sized for sure. In other words,
/// it might contain an incomplete array or some other dynamically sized
/// type.
NonZeroSized,
}
impl Default for SizednessResult {
fn default() -> Self {
SizednessResult::ZeroSized
}
}
impl SizednessResult {
/// Take the least upper bound of `self` and `rhs`.
pub fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for SizednessResult {
type Output = Self;
fn bitor(self, rhs: SizednessResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for SizednessResult {
fn bitor_assign(&mut self, rhs: SizednessResult) {
*self = self.join(rhs)
}
}
/// An analysis that computes the sizedness of all types.
///
/// * For types with known sizes -- for example pointers, scalars, etc... --
/// they are assigned `NonZeroSized`.
///
/// * For compound structure types with one or more fields, they are assigned
/// `NonZeroSized`.
///
/// * For compound structure types without any fields, the results of the bases
/// are `join`ed.
///
/// * For type parameters, `DependsOnTypeParam` is assigned.
#[derive(Debug)]
pub struct SizednessAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
dependencies: HashMap<TypeId, Vec<TypeId>>,
// Incremental results of the analysis. Missing entries are implicitly
// considered `ZeroSized`.
sized: HashMap<TypeId, SizednessResult>,
}
impl<'ctx> SizednessAnalysis<'ctx> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type is
// zero-sized or not.
matches!(
kind,
EdgeKind::TemplateArgument |
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateDeclaration |
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field
)
}
/// Insert an incremental result, and return whether this updated our
/// knowledge of types and we should continue the analysis.
fn insert(
&mut self,
id: TypeId,
result: SizednessResult,
) -> ConstrainResult {
trace!("inserting {:?} for {:?}", result, id);
if let SizednessResult::ZeroSized = result {
return ConstrainResult::Same;
}
match self.sized.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult {
match self.sized.get(&from).cloned() {
None => ConstrainResult::Same,
Some(r) => self.insert(to, r),
}
}
}
impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> {
type Node = TypeId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<TypeId, SizednessResult>;
fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> {
let dependencies = generate_dependencies(ctx, Self::consider_edge)
.into_iter()
.filter_map(|(id, sub_ids)| {
id.as_type_id(ctx).map(|id| {
(
id,
sub_ids
.into_iter()
.filter_map(|s| s.as_type_id(ctx))
.collect::<Vec<_>>(),
)
})
})
.collect();
let sized = HashMap::default();
SizednessAnalysis {
ctx,
dependencies,
sized,
}
}
fn initial_worklist(&self) -> Vec<TypeId> {
self.ctx
.allowlisted_items()
.iter()
.cloned()
.filter_map(|id| id.as_type_id(self.ctx))
.collect()
}
fn constrain(&mut self, id: TypeId) -> ConstrainResult {
trace!("constrain {:?}", id);
if let Some(SizednessResult::NonZeroSized) =
self.sized.get(&id).cloned()
{
trace!(" already know it is not zero-sized");
return ConstrainResult::Same;
}
if id.has_vtable_ptr(self.ctx) {
trace!(" has an explicit vtable pointer, therefore is not zero-sized");
return self.insert(id, SizednessResult::NonZeroSized);
}
let ty = self.ctx.resolve_type(id);
if id.is_opaque(self.ctx, &()) {
trace!(" type is opaque; checking layout...");
let result =
ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| {
if l.size == 0 {
trace!(" ...layout has size == 0");
SizednessResult::ZeroSized
} else {
trace!(" ...layout has size > 0");
SizednessResult::NonZeroSized
}
});
return self.insert(id, result);
}
match *ty.kind() {
TypeKind::Void => {
trace!(" void is zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::TypeParam => {
trace!(
" type params sizedness depends on what they're \
instantiated as"
);
self.insert(id, SizednessResult::DependsOnTypeParam)
}
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::NullPtr |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Pointer(..) => {
trace!(" {:?} is known not to be zero-sized", ty.kind());
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::ObjCInterface(..) => {
trace!(" obj-c interfaces always have at least the `isa` pointer");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) |
TypeKind::ResolvedTypeRef(t) => {
trace!(" aliases and type refs forward to their inner type");
self.forward(t, id)
}
TypeKind::TemplateInstantiation(ref inst) => {
trace!(
" template instantiations are zero-sized if their \
definition is zero-sized"
);
self.forward(inst.template_definition(), id)
}
TypeKind::Array(_, 0) => {
trace!(" arrays of zero elements are zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::Array(..) => {
trace!(" arrays of > 0 elements are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Vector(..) => {
trace!(" vectors are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own fields and bases");
if !info.fields().is_empty() {
return self.insert(id, SizednessResult::NonZeroSized);
}
let result = info
.base_members()
.iter()
.filter_map(|base| self.sized.get(&base.ty))
.fold(SizednessResult::ZeroSized, |a, b| a.join(*b));
self.insert(id, result)
}
TypeKind::Opaque => {
unreachable!("covered by the .is_opaque() check above")
}
TypeKind::UnresolvedTypeRef(..) => {
unreachable!("Should have been resolved after parsing!");
}
}
}
fn each_depending_on<F>(&self, id: TypeId, mut f: F)
where
F: FnMut(TypeId),
{
if let Some(edges) = self.dependencies.get(&id) {
for ty in edges {
trace!("enqueue {:?} into worklist", ty);
f(*ty);
}
}
}
}
impl<'ctx> From<SizednessAnalysis<'ctx>> for HashMap<TypeId, SizednessResult> {
fn from(analysis: SizednessAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "ZeroSized" to save space.
extra_assert!(analysis
.sized
.values()
.all(|v| { *v != SizednessResult::ZeroSized }));
analysis.sized
}
}
/// A convenience trait for querying whether some type or id is sized.
///
/// This is not for _computing_ whether the thing is sized, it is for looking up
/// the results of the `Sizedness` analysis's computations for a specific thing.
pub trait Sizedness {
/// Get the sizedness of this type.
fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult;
/// Is the sizedness for this type `SizednessResult::ZeroSized`?
fn is_zero_sized(&self, ctx: &BindgenContext) -> bool {
self.sizedness(ctx) == SizednessResult::ZeroSized
}
}

View file

@ -0,0 +1,608 @@
//! Discover which template type parameters are actually used.
//!
//! ### Why do we care?
//!
//! C++ allows ignoring template parameters, while Rust does not. Usually we can
//! blindly stick a `PhantomData<T>` inside a generic Rust struct to make up for
//! this. That doesn't work for templated type aliases, however:
//!
//! ```C++
//! template <typename T>
//! using Fml = int;
//! ```
//!
//! If we generate the naive Rust code for this alias, we get:
//!
//! ```ignore
//! pub type Fml<T> = ::std::os::raw::int;
//! ```
//!
//! And this is rejected by `rustc` due to the unused type parameter.
//!
//! (Aside: in these simple cases, `libclang` will often just give us the
//! aliased type directly, and we will never even know we were dealing with
//! aliases, let alone templated aliases. It's the more convoluted scenarios
//! where we get to have some fun...)
//!
//! For such problematic template aliases, we could generate a tuple whose
//! second member is a `PhantomData<T>`. Or, if we wanted to go the extra mile,
//! we could even generate some smarter wrapper that implements `Deref`,
//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased
//! type. However, this is still lackluster:
//!
//! 1. Even with a billion conversion-trait implementations, using the generated
//! bindings is rather un-ergonomic.
//! 2. With either of these solutions, we need to keep track of which aliases
//! we've transformed like this in order to generate correct uses of the
//! wrapped type.
//!
//! Given that we have to properly track which template parameters ended up used
//! for (2), we might as well leverage that information to make ergonomic
//! bindings that don't contain any unused type parameters at all, and
//! completely avoid the pain of (1).
//!
//! ### How do we determine which template parameters are used?
//!
//! Determining which template parameters are actually used is a trickier
//! problem than it might seem at a glance. On the one hand, trivial uses are
//! easy to detect:
//!
//! ```C++
//! template <typename T>
//! class Foo {
//! T trivial_use_of_t;
//! };
//! ```
//!
//! It gets harder when determining if one template parameter is used depends on
//! determining if another template parameter is used. In this example, whether
//! `U` is used depends on whether `T` is used.
//!
//! ```C++
//! template <typename T>
//! class DoesntUseT {
//! int x;
//! };
//!
//! template <typename U>
//! class Fml {
//! DoesntUseT<U> lololol;
//! };
//! ```
//!
//! We can express the set of used template parameters as a constraint solving
//! problem (where the set of template parameters used by a given IR item is the
//! union of its sub-item's used template parameters) and iterate to a
//! fixed-point.
//!
//! We use the `ir::analysis::MonotoneFramework` infrastructure for this
//! fix-point analysis, where our lattice is the mapping from each IR item to
//! the powerset of the template parameters that appear in the input C++ header,
//! our join function is set union. The set of template parameters appearing in
//! the program is finite, as is the number of IR items. We start at our
//! lattice's bottom element: every item mapping to an empty set of template
//! parameters. Our analysis only adds members to each item's set of used
//! template parameters, never removes them, so it is monotone. Because our
//! lattice is finite and our constraint function is monotone, iteration to a
//! fix-point will terminate.
//!
//! See `src/ir/analysis.rs` for more.
use super::{ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::item::{Item, ItemSet};
use crate::ir::template::{TemplateInstantiation, TemplateParameters};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item its set of template parameters that
/// it uses.
///
/// We use the monotone constraint function `template_param_usage`, defined as
/// follows:
///
/// * If `T` is a named template type parameter, it trivially uses itself:
///
/// ```ignore
/// template_param_usage(T) = { T }
/// ```
///
/// * If `inst` is a template instantiation, `inst.args` are the template
/// instantiation's template arguments, `inst.def` is the template definition
/// being instantiated, and `inst.def.params` is the template definition's
/// template parameters, then the instantiation's usage is the union of each
/// of its arguments' usages *if* the corresponding template parameter is in
/// turn used by the template definition:
///
/// ```ignore
/// template_param_usage(inst) = union(
/// template_param_usage(inst.args[i])
/// for i in 0..length(inst.args.length)
/// if inst.def.params[i] in template_param_usage(inst.def)
/// )
/// ```
///
/// * Finally, for all other IR item kinds, we use our lattice's `join`
/// operation: set union with each successor of the given item's template
/// parameter usage:
///
/// ```ignore
/// template_param_usage(v) =
/// union(template_param_usage(w) for w in successors(v))
/// ```
///
/// Note that we ignore certain edges in the graph, such as edges from a
/// template declaration to its template parameters' definitions for this
/// analysis. If we didn't, then we would mistakenly determine that ever
/// template parameter is always used.
///
/// The final wrinkle is handling of blocklisted types. Normally, we say that
/// the set of allowlisted items is the transitive closure of items explicitly
/// called out for allowlisting, *without* any items explicitly called out as
/// blocklisted. However, for the purposes of this analysis's correctness, we
/// simplify and consider run the analysis on the full transitive closure of
/// allowlisted items. We do, however, treat instantiations of blocklisted items
/// specially; see `constrain_instantiation_of_blocklisted_template` and its
/// documentation for details.
#[derive(Debug, Clone)]
pub struct UsedTemplateParameters<'ctx> {
ctx: &'ctx BindgenContext,
// The Option is only there for temporary moves out of the hash map. See the
// comments in `UsedTemplateParameters::constrain` below.
used: HashMap<ItemId, Option<ItemSet>>,
dependencies: HashMap<ItemId, Vec<ItemId>>,
// The set of allowlisted items, without any blocklisted items reachable
// from the allowlisted items which would otherwise be considered
// allowlisted as well.
allowlisted_items: HashSet<ItemId>,
}
impl<'ctx> UsedTemplateParameters<'ctx> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// For each of these kinds of edges, if the referent uses a template
// parameter, then it should be considered that the origin of the
// edge also uses the template parameter.
EdgeKind::TemplateArgument |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::VarType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::TypeReference => true,
// An inner var or type using a template parameter is orthogonal
// from whether we use it. See template-param-usage-{6,11}.hpp.
EdgeKind::InnerVar | EdgeKind::InnerType => false,
// We can't emit machine code for new monomorphizations of class
// templates' methods (and don't detect explicit instantiations) so
// we must ignore template parameters that are only used by
// methods. This doesn't apply to a function type's return or
// parameter types, however, because of type aliases of function
// pointers that use template parameters, eg
// tests/headers/struct_with_typedef_template_arg.hpp
EdgeKind::Method => false,
// If we considered these edges, we would end up mistakenly claiming
// that every template parameter always used.
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => false,
// Since we have to be careful about which edges we consider for
// this analysis to be correct, we ignore generic edges. We also
// avoid a `_` wild card to force authors of new edge kinds to
// determine whether they need to be considered by this analysis.
EdgeKind::Generic => false,
}
}
fn take_this_id_usage_set<Id: Into<ItemId>>(
&mut self,
this_id: Id,
) -> ItemSet {
let this_id = this_id.into();
self.used
.get_mut(&this_id)
.expect(
"Should have a set of used template params for every item \
id",
)
.take()
.expect(
"Should maintain the invariant that all used template param \
sets are `Some` upon entry of `constrain`",
)
}
/// We say that blocklisted items use all of their template parameters. The
/// blocklisted type is most likely implemented explicitly by the user,
/// since it won't be in the generated bindings, and we don't know exactly
/// what they'll to with template parameters, but we can push the issue down
/// the line to them.
fn constrain_instantiation_of_blocklisted_template(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(
" instantiation of blocklisted template, uses all template \
arguments"
);
let args = instantiation
.template_arguments()
.iter()
.map(|a| {
a.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id()
})
.filter(|a| *a != this_id)
.flat_map(|a| {
self.used
.get(&a)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because a != this_id, and all used template \
param sets other than this_id's are `Some`, \
a's used template param set should be `Some`",
)
.iter()
.cloned()
});
used_by_this_id.extend(args);
}
/// A template instantiation's concrete template argument is only used if
/// the template definition uses the corresponding template parameter.
fn constrain_instantiation(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(" template instantiation");
let decl = self.ctx.resolve_type(instantiation.template_definition());
let args = instantiation.template_arguments();
let params = decl.self_template_params(self.ctx);
debug_assert!(this_id != instantiation.template_definition());
let used_by_def = self.used
.get(&instantiation.template_definition().into())
.expect("Should have a used entry for instantiation's template definition")
.as_ref()
.expect("And it should be Some because only this_id's set is None, and an \
instantiation's template definition should never be the \
instantiation itself");
for (arg, param) in args.iter().zip(params.iter()) {
trace!(
" instantiation's argument {:?} is used if definition's \
parameter {:?} is used",
arg,
param
);
if used_by_def.contains(&param.into()) {
trace!(" param is used by template definition");
let arg = arg
.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id();
if arg == this_id {
continue;
}
let used_by_arg = self
.used
.get(&arg)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because arg != this_id, and all used template \
param sets other than this_id's are `Some`, \
arg's used template param set should be \
`Some`",
)
.iter()
.cloned();
used_by_this_id.extend(used_by_arg);
}
}
}
/// The join operation on our lattice: the set union of all of this id's
/// successors.
fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) {
trace!(" other item: join with successors' usage");
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !Self::consider_edge(edge_kind) {
return;
}
let used_by_sub_id = self
.used
.get(&sub_id)
.expect("Should have a used set for the sub_id successor")
.as_ref()
.expect(
"Because sub_id != id, and all used template \
param sets other than id's are `Some`, \
sub_id's used template param set should be \
`Some`",
)
.iter()
.cloned();
trace!(
" union with {:?}'s usage: {:?}",
sub_id,
used_by_sub_id.clone().collect::<Vec<_>>()
);
used_by_this_id.extend(used_by_sub_id);
},
&(),
);
}
}
impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, ItemSet>;
fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> {
let mut used = HashMap::default();
let mut dependencies = HashMap::default();
let allowlisted_items: HashSet<_> =
ctx.allowlisted_items().iter().cloned().collect();
let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items
.iter()
.cloned()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect();
for item in allowlisted_and_blocklisted_items {
dependencies.entry(item).or_insert_with(Vec::new);
used.entry(item).or_insert_with(|| Some(ItemSet::new()));
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, _| {
used.entry(sub_item)
.or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
},
&(),
);
}
// Additionally, whether a template instantiation's template
// arguments are used depends on whether the template declaration's
// generic template parameters are used.
let item_kind =
ctx.resolve_item(item).as_type().map(|ty| ty.kind());
if let Some(&TypeKind::TemplateInstantiation(ref inst)) = item_kind
{
let decl = ctx.resolve_type(inst.template_definition());
let args = inst.template_arguments();
// Although template definitions should always have
// template parameters, there is a single exception:
// opaque templates. Hence the unwrap_or.
let params = decl.self_template_params(ctx);
for (arg, param) in args.iter().zip(params.iter()) {
let arg = arg
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
let param = param
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
used.entry(arg).or_insert_with(|| Some(ItemSet::new()));
used.entry(param).or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(arg)
.or_insert_with(Vec::new)
.push(param);
}
}
}
if cfg!(feature = "testing_only_extra_assertions") {
// Invariant: The `used` map has an entry for every allowlisted
// item, as well as all explicitly blocklisted items that are
// reachable from allowlisted items.
//
// Invariant: the `dependencies` map has an entry for every
// allowlisted item.
//
// (This is so that every item we call `constrain` on is guaranteed
// to have a set of template parameters, and we can allow
// blocklisted templates to use all of their parameters).
for item in allowlisted_items.iter() {
extra_assert!(used.contains_key(item));
extra_assert!(dependencies.contains_key(item));
item.trace(
ctx,
&mut |sub_item, _| {
extra_assert!(used.contains_key(&sub_item));
extra_assert!(dependencies.contains_key(&sub_item));
},
&(),
)
}
}
UsedTemplateParameters {
ctx,
used,
dependencies,
allowlisted_items,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.cloned()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
// Invariant: all hash map entries' values are `Some` upon entering and
// exiting this method.
extra_assert!(self.used.values().all(|v| v.is_some()));
// Take the set for this id out of the hash map while we mutate it based
// on other hash map entries. We *must* put it back into the hash map at
// the end of this method. This allows us to side-step HashMap's lack of
// an analog to slice::split_at_mut.
let mut used_by_this_id = self.take_this_id_usage_set(id);
trace!("constrain {:?}", id);
trace!(" initially, used set is {:?}", used_by_this_id);
let original_len = used_by_this_id.len();
let item = self.ctx.resolve_item(id);
let ty_kind = item.as_type().map(|ty| ty.kind());
match ty_kind {
// Named template type parameters trivially use themselves.
Some(&TypeKind::TypeParam) => {
trace!(" named type, trivially uses itself");
used_by_this_id.insert(id);
}
// Template instantiations only use their template arguments if the
// template definition uses the corresponding template parameter.
Some(&TypeKind::TemplateInstantiation(ref inst)) => {
if self
.allowlisted_items
.contains(&inst.template_definition().into())
{
self.constrain_instantiation(
id,
&mut used_by_this_id,
inst,
);
} else {
self.constrain_instantiation_of_blocklisted_template(
id,
&mut used_by_this_id,
inst,
);
}
}
// Otherwise, add the union of each of its referent item's template
// parameter usage.
_ => self.constrain_join(&mut used_by_this_id, item),
}
trace!(" finally, used set is {:?}", used_by_this_id);
let new_len = used_by_this_id.len();
assert!(
new_len >= original_len,
"This is the property that ensures this function is monotone -- \
if it doesn't hold, the analysis might never terminate!"
);
// Put the set back in the hash map and restore our invariant.
debug_assert!(self.used[&id].is_none());
self.used.insert(id, Some(used_by_this_id));
extra_assert!(self.used.values().all(|v| v.is_some()));
if new_len != original_len {
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
fn each_depending_on<F>(&self, item: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&item) {
for item in edges {
trace!("enqueue {:?} into worklist", item);
f(*item);
}
}
}
}
impl<'ctx> From<UsedTemplateParameters<'ctx>> for HashMap<ItemId, ItemSet> {
fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self {
used_templ_params
.used
.into_iter()
.map(|(k, v)| (k, v.unwrap()))
.collect()
}
}

View file

@ -0,0 +1,211 @@
//! Types and functions related to bindgen annotation comments.
//!
//! Users can add annotations in doc comments to types that they would like to
//! replace other types with, mark as opaque, etc. This module deals with all of
//! that stuff.
use crate::clang;
/// What kind of accessor should we provide for a field?
#[derive(Copy, PartialEq, Eq, Clone, Debug)]
pub enum FieldAccessorKind {
/// No accessor.
None,
/// Plain accessor.
Regular,
/// Unsafe accessor.
Unsafe,
/// Immutable accessor.
Immutable,
}
/// Annotations for a given item, or a field.
///
/// You can see the kind of comments that are accepted in the Doxygen
/// documentation:
///
/// http://www.stack.nl/~dimitri/doxygen/manual/docblocks.html
#[derive(Default, Clone, PartialEq, Eq, Debug)]
pub struct Annotations {
/// Whether this item is marked as opaque. Only applies to types.
opaque: bool,
/// Whether this item should be hidden from the output. Only applies to
/// types, or enum variants.
hide: bool,
/// Whether this type should be replaced by another. The name is a
/// namespace-aware path.
use_instead_of: Option<Vec<String>>,
/// Manually disable deriving copy/clone on this type. Only applies to
/// struct or union types.
disallow_copy: bool,
/// Manually disable deriving debug on this type.
disallow_debug: bool,
/// Manually disable deriving/implement default on this type.
disallow_default: bool,
/// Whether to add a #[must_use] annotation to this type.
must_use_type: bool,
/// Whether fields should be marked as private or not. You can set this on
/// structs (it will apply to all the fields), or individual fields.
private_fields: Option<bool>,
/// The kind of accessor this field will have. Also can be applied to
/// structs so all the fields inside share it by default.
accessor_kind: Option<FieldAccessorKind>,
/// Whether this enum variant should be constified.
///
/// This is controlled by the `constant` attribute, this way:
///
/// ```cpp
/// enum Foo {
/// Bar = 0, /**< <div rustbindgen constant></div> */
/// Baz = 0,
/// };
/// ```
///
/// In that case, bindgen will generate a constant for `Bar` instead of
/// `Baz`.
constify_enum_variant: bool,
/// List of explicit derives for this type.
derives: Vec<String>,
}
fn parse_accessor(s: &str) -> FieldAccessorKind {
match s {
"false" => FieldAccessorKind::None,
"unsafe" => FieldAccessorKind::Unsafe,
"immutable" => FieldAccessorKind::Immutable,
_ => FieldAccessorKind::Regular,
}
}
impl Annotations {
/// Construct new annotations for the given cursor and its bindgen comments
/// (if any).
pub fn new(cursor: &clang::Cursor) -> Option<Annotations> {
let mut anno = Annotations::default();
let mut matched_one = false;
anno.parse(&cursor.comment(), &mut matched_one);
if matched_one {
Some(anno)
} else {
None
}
}
/// Should this type be hidden?
pub fn hide(&self) -> bool {
self.hide
}
/// Should this type be opaque?
pub fn opaque(&self) -> bool {
self.opaque
}
/// For a given type, indicates the type it should replace.
///
/// For example, in the following code:
///
/// ```cpp
///
/// /** <div rustbindgen replaces="Bar"></div> */
/// struct Foo { int x; };
///
/// struct Bar { char foo; };
/// ```
///
/// the generated code would look something like:
///
/// ```
/// /** <div rustbindgen replaces="Bar"></div> */
/// struct Bar {
/// x: ::std::os::raw::c_int,
/// };
/// ```
///
/// That is, code for `Foo` is used to generate `Bar`.
pub fn use_instead_of(&self) -> Option<&[String]> {
self.use_instead_of.as_deref()
}
/// The list of derives that have been specified in this annotation.
pub fn derives(&self) -> &[String] {
&self.derives
}
/// Should we avoid implementing the `Copy` trait?
pub fn disallow_copy(&self) -> bool {
self.disallow_copy
}
/// Should we avoid implementing the `Debug` trait?
pub fn disallow_debug(&self) -> bool {
self.disallow_debug
}
/// Should we avoid implementing the `Default` trait?
pub fn disallow_default(&self) -> bool {
self.disallow_default
}
/// Should this type get a `#[must_use]` annotation?
pub fn must_use_type(&self) -> bool {
self.must_use_type
}
/// Should the fields be private?
pub fn private_fields(&self) -> Option<bool> {
self.private_fields
}
/// What kind of accessors should we provide for this type's fields?
pub fn accessor_kind(&self) -> Option<FieldAccessorKind> {
self.accessor_kind
}
fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) {
use clang_sys::CXComment_HTMLStartTag;
if comment.kind() == CXComment_HTMLStartTag &&
comment.get_tag_name() == "div" &&
comment
.get_tag_attrs()
.next()
.map_or(false, |attr| attr.name == "rustbindgen")
{
*matched = true;
for attr in comment.get_tag_attrs() {
match attr.name.as_str() {
"opaque" => self.opaque = true,
"hide" => self.hide = true,
"nocopy" => self.disallow_copy = true,
"nodebug" => self.disallow_debug = true,
"nodefault" => self.disallow_default = true,
"mustusetype" => self.must_use_type = true,
"replaces" => {
self.use_instead_of = Some(
attr.value.split("::").map(Into::into).collect(),
)
}
"derive" => self.derives.push(attr.value),
"private" => {
self.private_fields = Some(attr.value != "false")
}
"accessor" => {
self.accessor_kind = Some(parse_accessor(&attr.value))
}
"constant" => self.constify_enum_variant = true,
_ => {}
}
}
}
for child in comment.get_children() {
self.parse(&child, matched);
}
}
/// Returns whether we've parsed a "constant" attribute.
pub fn constify_enum_variant(&self) -> bool {
self.constify_enum_variant
}
}

100
third-party/vendor/bindgen/ir/comment.rs vendored Normal file
View file

@ -0,0 +1,100 @@
//! Utilities for manipulating C/C++ comments.
/// The type of a comment.
#[derive(Debug, PartialEq, Eq)]
enum Kind {
/// A `///` comment, or something of the like.
/// All lines in a comment should start with the same symbol.
SingleLines,
/// A `/**` comment, where each other line can start with `*` and the
/// entire block ends with `*/`.
MultiLine,
}
/// Preprocesses a C/C++ comment so that it is a valid Rust comment.
pub fn preprocess(comment: &str) -> String {
match self::kind(comment) {
Some(Kind::SingleLines) => preprocess_single_lines(comment),
Some(Kind::MultiLine) => preprocess_multi_line(comment),
None => comment.to_owned(),
}
}
/// Gets the kind of the doc comment, if it is one.
fn kind(comment: &str) -> Option<Kind> {
if comment.starts_with("/*") {
Some(Kind::MultiLine)
} else if comment.starts_with("//") {
Some(Kind::SingleLines)
} else {
None
}
}
/// Preprocesses multiple single line comments.
///
/// Handles lines starting with both `//` and `///`.
fn preprocess_single_lines(comment: &str) -> String {
debug_assert!(comment.starts_with("//"), "comment is not single line");
let lines: Vec<_> = comment
.lines()
.map(|l| l.trim().trim_start_matches('/'))
.collect();
lines.join("\n")
}
fn preprocess_multi_line(comment: &str) -> String {
let comment = comment
.trim_start_matches('/')
.trim_end_matches('/')
.trim_end_matches('*');
// Strip any potential `*` characters preceding each line.
let mut lines: Vec<_> = comment
.lines()
.map(|line| line.trim().trim_start_matches('*').trim_start_matches('!'))
.skip_while(|line| line.trim().is_empty()) // Skip the first empty lines.
.collect();
// Remove the trailing line corresponding to the `*/`.
if lines.last().map_or(false, |l| l.trim().is_empty()) {
lines.pop();
}
lines.join("\n")
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn picks_up_single_and_multi_line_doc_comments() {
assert_eq!(kind("/// hello"), Some(Kind::SingleLines));
assert_eq!(kind("/** world */"), Some(Kind::MultiLine));
}
#[test]
fn processes_single_lines_correctly() {
assert_eq!(preprocess("///"), "");
assert_eq!(preprocess("/// hello"), " hello");
assert_eq!(preprocess("// hello"), " hello");
assert_eq!(preprocess("// hello"), " hello");
}
#[test]
fn processes_multi_lines_correctly() {
assert_eq!(preprocess("/**/"), "");
assert_eq!(
preprocess("/** hello \n * world \n * foo \n */"),
" hello\n world\n foo"
);
assert_eq!(
preprocess("/**\nhello\n*world\n*foo\n*/"),
"hello\nworld\nfoo"
);
}
}

1890
third-party/vendor/bindgen/ir/comp.rs vendored Normal file

File diff suppressed because it is too large Load diff

2858
third-party/vendor/bindgen/ir/context.rs vendored Normal file

File diff suppressed because it is too large Load diff

135
third-party/vendor/bindgen/ir/derive.rs vendored Normal file
View file

@ -0,0 +1,135 @@
//! Traits for determining whether we can derive traits for a thing or not.
//!
//! These traits tend to come in pairs:
//!
//! 1. A "trivial" version, whose implementations aren't allowed to recursively
//! look at other types or the results of fix point analyses.
//!
//! 2. A "normal" version, whose implementations simply query the results of a
//! fix point analysis.
//!
//! The former is used by the analyses when creating the results queried by the
//! second.
use super::context::BindgenContext;
use std::cmp;
use std::ops;
/// A trait that encapsulates the logic for whether or not we can derive `Debug`
/// for a given thing.
pub trait CanDeriveDebug {
/// Return `true` if `Debug` can be derived for this thing, `false`
/// otherwise.
fn can_derive_debug(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Copy`
/// for a given thing.
pub trait CanDeriveCopy {
/// Return `true` if `Copy` can be derived for this thing, `false`
/// otherwise.
fn can_derive_copy(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `Default` for a given thing.
pub trait CanDeriveDefault {
/// Return `true` if `Default` can be derived for this thing, `false`
/// otherwise.
fn can_derive_default(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Hash`
/// for a given thing.
pub trait CanDeriveHash {
/// Return `true` if `Hash` can be derived for this thing, `false`
/// otherwise.
fn can_derive_hash(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `PartialEq` for a given thing.
pub trait CanDerivePartialEq {
/// Return `true` if `PartialEq` can be derived for this thing, `false`
/// otherwise.
fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `PartialOrd` for a given thing.
pub trait CanDerivePartialOrd {
/// Return `true` if `PartialOrd` can be derived for this thing, `false`
/// otherwise.
fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Eq`
/// for a given thing.
pub trait CanDeriveEq {
/// Return `true` if `Eq` can be derived for this thing, `false` otherwise.
fn can_derive_eq(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Ord`
/// for a given thing.
pub trait CanDeriveOrd {
/// Return `true` if `Ord` can be derived for this thing, `false` otherwise.
fn can_derive_ord(&self, ctx: &BindgenContext) -> bool;
}
/// Whether it is possible or not to automatically derive trait for an item.
///
/// ```ignore
/// No
/// ^
/// |
/// Manually
/// ^
/// |
/// Yes
/// ```
///
/// Initially we assume that we can derive trait for all types and then
/// update our understanding as we learn more about each type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum CanDerive {
/// Yes, we can derive automatically.
Yes,
/// The only thing that stops us from automatically deriving is that
/// array with more than maximum number of elements is used.
///
/// This means we probably can "manually" implement such trait.
Manually,
/// No, we cannot.
No,
}
impl Default for CanDerive {
fn default() -> CanDerive {
CanDerive::Yes
}
}
impl CanDerive {
/// Take the least upper bound of `self` and `rhs`.
pub fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for CanDerive {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for CanDerive {
fn bitor_assign(&mut self, rhs: Self) {
*self = self.join(rhs)
}
}

86
third-party/vendor/bindgen/ir/dot.rs vendored Normal file
View file

@ -0,0 +1,86 @@
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write;
}
/// Write a graphviz dot file containing our IR.
pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, r#"</table> >];"#)?;
item.trace(
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
edge_kind,
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) => {}
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}

320
third-party/vendor/bindgen/ir/enum_ty.rs vendored Normal file
View file

@ -0,0 +1,320 @@
//! Intermediate representation for C/C++ enumerations.
use super::super::codegen::EnumVariation;
use super::context::{BindgenContext, TypeId};
use super::item::Item;
use super::ty::{Type, TypeKind};
use crate::clang;
use crate::ir::annotations::Annotations;
use crate::parse::{ClangItemParser, ParseError};
use crate::regex_set::RegexSet;
/// An enum representing custom handling that can be given to a variant.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum EnumVariantCustomBehavior {
/// This variant will be a module containing constants.
ModuleConstify,
/// This variant will be constified, that is, forced to generate a constant.
Constify,
/// This variant will be hidden entirely from the resulting enum.
Hide,
}
/// A C/C++ enumeration.
#[derive(Debug)]
pub struct Enum {
/// The representation used for this enum; it should be an `IntKind` type or
/// an alias to one.
///
/// It's `None` if the enum is a forward declaration and isn't defined
/// anywhere else, see `tests/headers/func_ptr_in_struct.h`.
repr: Option<TypeId>,
/// The different variants, with explicit values.
variants: Vec<EnumVariant>,
}
impl Enum {
/// Construct a new `Enum` with the given representation and variants.
pub fn new(repr: Option<TypeId>, variants: Vec<EnumVariant>) -> Self {
Enum { repr, variants }
}
/// Get this enumeration's representation.
pub fn repr(&self) -> Option<TypeId> {
self.repr
}
/// Get this enumeration's variants.
pub fn variants(&self) -> &[EnumVariant] {
&self.variants
}
/// Construct an enumeration from the given Clang type.
pub fn from_ty(
ty: &clang::Type,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("Enum::from_ty {:?}", ty);
if ty.kind() != CXType_Enum {
return Err(ParseError::Continue);
}
let declaration = ty.declaration().canonical();
let repr = declaration
.enum_type()
.and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok());
let mut variants = vec![];
let variant_ty =
repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx));
let is_bool = variant_ty.map_or(false, Type::is_bool);
// Assume signedness since the default type by the C standard is an int.
let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() {
TypeKind::Int(ref int_kind) => int_kind.is_signed(),
ref other => {
panic!("Since when enums can be non-integers? {:?}", other)
}
});
let type_name = ty.spelling();
let type_name = if type_name.is_empty() {
None
} else {
Some(type_name)
};
let type_name = type_name.as_deref();
let definition = declaration.definition().unwrap_or(declaration);
definition.visit(|cursor| {
if cursor.kind() == CXCursor_EnumConstantDecl {
let value = if is_bool {
cursor.enum_val_boolean().map(EnumVariantValue::Boolean)
} else if is_signed {
cursor.enum_val_signed().map(EnumVariantValue::Signed)
} else {
cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned)
};
if let Some(val) = value {
let name = cursor.spelling();
let annotations = Annotations::new(&cursor);
let custom_behavior = ctx
.options()
.last_callback(|callbacks| {
callbacks
.enum_variant_behavior(type_name, &name, val)
})
.or_else(|| {
let annotations = annotations.as_ref()?;
if annotations.hide() {
Some(EnumVariantCustomBehavior::Hide)
} else if annotations.constify_enum_variant() {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
}
});
let new_name = ctx
.options()
.last_callback(|callbacks| {
callbacks.enum_variant_name(type_name, &name, val)
})
.or_else(|| {
annotations
.as_ref()?
.use_instead_of()?
.last()
.cloned()
})
.unwrap_or_else(|| name.clone());
let comment = cursor.raw_comment();
variants.push(EnumVariant::new(
new_name,
name,
comment,
val,
custom_behavior,
));
}
}
CXChildVisit_Continue
});
Ok(Enum::new(repr, variants))
}
fn is_matching_enum(
&self,
ctx: &BindgenContext,
enums: &RegexSet,
item: &Item,
) -> bool {
let path = item.path_for_allowlisting(ctx);
let enum_ty = item.expect_type();
if enums.matches(path[1..].join("::")) {
return true;
}
// Test the variants if the enum is anonymous.
if enum_ty.name().is_some() {
return false;
}
self.variants().iter().any(|v| enums.matches(v.name()))
}
/// Returns the final representation of the enum.
pub fn computed_enum_variation(
&self,
ctx: &BindgenContext,
item: &Item,
) -> EnumVariation {
// ModuleConsts has higher precedence before Rust in order to avoid
// problems with overlapping match patterns.
if self.is_matching_enum(
ctx,
&ctx.options().constified_enum_modules,
item,
) {
EnumVariation::ModuleConsts
} else if self.is_matching_enum(
ctx,
&ctx.options().bitfield_enums,
item,
) {
EnumVariation::NewType {
is_bitfield: true,
is_global: false,
}
} else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item)
{
EnumVariation::NewType {
is_bitfield: false,
is_global: false,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().newtype_global_enums,
item,
) {
EnumVariation::NewType {
is_bitfield: false,
is_global: true,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().rustified_enums,
item,
) {
EnumVariation::Rust {
non_exhaustive: false,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().rustified_non_exhaustive_enums,
item,
) {
EnumVariation::Rust {
non_exhaustive: true,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().constified_enums,
item,
) {
EnumVariation::Consts
} else {
ctx.options().default_enum_style
}
}
}
/// A single enum variant, to be contained only in an enum.
#[derive(Debug)]
pub struct EnumVariant {
/// The name of the variant.
name: String,
/// The original name of the variant (without user mangling)
name_for_allowlisting: String,
/// An optional doc comment.
comment: Option<String>,
/// The integer value of the variant.
val: EnumVariantValue,
/// The custom behavior this variant may have, if any.
custom_behavior: Option<EnumVariantCustomBehavior>,
}
/// A constant value assigned to an enumeration variant.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum EnumVariantValue {
/// A boolean constant.
Boolean(bool),
/// A signed constant.
Signed(i64),
/// An unsigned constant.
Unsigned(u64),
}
impl EnumVariant {
/// Construct a new enumeration variant from the given parts.
pub fn new(
name: String,
name_for_allowlisting: String,
comment: Option<String>,
val: EnumVariantValue,
custom_behavior: Option<EnumVariantCustomBehavior>,
) -> Self {
EnumVariant {
name,
name_for_allowlisting,
comment,
val,
custom_behavior,
}
}
/// Get this variant's name.
pub fn name(&self) -> &str {
&self.name
}
/// Get this variant's name.
pub fn name_for_allowlisting(&self) -> &str {
&self.name_for_allowlisting
}
/// Get this variant's value.
pub fn val(&self) -> EnumVariantValue {
self.val
}
/// Get this variant's documentation.
pub fn comment(&self) -> Option<&str> {
self.comment.as_deref()
}
/// Returns whether this variant should be enforced to be a constant by code
/// generation.
pub fn force_constification(&self) -> bool {
self.custom_behavior
.map_or(false, |b| b == EnumVariantCustomBehavior::Constify)
}
/// Returns whether the current variant should be hidden completely from the
/// resulting rust enum.
pub fn hidden(&self) -> bool {
self.custom_behavior
.map_or(false, |b| b == EnumVariantCustomBehavior::Hide)
}
}

View file

@ -0,0 +1,747 @@
//! Intermediate representation for C/C++ functions and methods.
use super::comp::MethodKind;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::item::Item;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::TypeKind;
use crate::clang::{self, Attribute};
use crate::parse::{
ClangItemParser, ClangSubItemParser, ParseError, ParseResult,
};
use clang_sys::{self, CXCallingConv};
use proc_macro2;
use quote;
use quote::TokenStreamExt;
use std::io;
use std::str::FromStr;
const RUST_DERIVE_FUNPTR_LIMIT: usize = 12;
/// What kind of a function are we looking at?
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum FunctionKind {
/// A plain, free function.
Function,
/// A method of some kind.
Method(MethodKind),
}
impl FunctionKind {
/// Given a clang cursor, return the kind of function it represents, or
/// `None` otherwise.
pub fn from_cursor(cursor: &clang::Cursor) -> Option<FunctionKind> {
// FIXME(emilio): Deduplicate logic with `ir::comp`.
Some(match cursor.kind() {
clang_sys::CXCursor_FunctionDecl => FunctionKind::Function,
clang_sys::CXCursor_Constructor => {
FunctionKind::Method(MethodKind::Constructor)
}
clang_sys::CXCursor_Destructor => {
FunctionKind::Method(if cursor.method_is_virtual() {
MethodKind::VirtualDestructor {
pure_virtual: cursor.method_is_pure_virtual(),
}
} else {
MethodKind::Destructor
})
}
clang_sys::CXCursor_CXXMethod => {
if cursor.method_is_virtual() {
FunctionKind::Method(MethodKind::Virtual {
pure_virtual: cursor.method_is_pure_virtual(),
})
} else if cursor.method_is_static() {
FunctionKind::Method(MethodKind::Static)
} else {
FunctionKind::Method(MethodKind::Normal)
}
}
_ => return None,
})
}
}
/// The style of linkage
#[derive(Debug, Clone, Copy)]
pub enum Linkage {
/// Externally visible and can be linked against
External,
/// Not exposed externally. 'static inline' functions will have this kind of linkage
Internal,
}
/// A function declaration, with a signature, arguments, and argument names.
///
/// The argument names vector must be the same length as the ones in the
/// signature.
#[derive(Debug)]
pub struct Function {
/// The name of this function.
name: String,
/// The mangled name, that is, the symbol.
mangled_name: Option<String>,
/// The id pointing to the current function signature.
signature: TypeId,
/// The doc comment on the function, if any.
comment: Option<String>,
/// The kind of function this is.
kind: FunctionKind,
/// The linkage of the function.
linkage: Linkage,
}
impl Function {
/// Construct a new function.
pub fn new(
name: String,
mangled_name: Option<String>,
signature: TypeId,
comment: Option<String>,
kind: FunctionKind,
linkage: Linkage,
) -> Self {
Function {
name,
mangled_name,
signature,
comment,
kind,
linkage,
}
}
/// Get this function's name.
pub fn name(&self) -> &str {
&self.name
}
/// Get this function's name.
pub fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_deref()
}
/// Get this function's signature type.
pub fn signature(&self) -> TypeId {
self.signature
}
/// Get this function's comment.
pub fn comment(&self) -> Option<&str> {
self.comment.as_deref()
}
/// Get this function's kind.
pub fn kind(&self) -> FunctionKind {
self.kind
}
/// Get this function's linkage.
pub fn linkage(&self) -> Linkage {
self.linkage
}
}
impl DotAttributes for Function {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if let Some(ref mangled) = self.mangled_name {
let mangled: String =
mangled.chars().flat_map(|c| c.escape_default()).collect();
writeln!(
out,
"<tr><td>mangled name</td><td>{}</td></tr>",
mangled
)?;
}
Ok(())
}
}
/// A valid rust ABI.
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum Abi {
/// The default C ABI.
C,
/// The "stdcall" ABI.
Stdcall,
/// The "fastcall" ABI.
Fastcall,
/// The "thiscall" ABI.
ThisCall,
/// The "vectorcall" ABI.
Vectorcall,
/// The "aapcs" ABI.
Aapcs,
/// The "win64" ABI.
Win64,
/// The "C-unwind" ABI.
CUnwind,
}
impl FromStr for Abi {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"C" => Ok(Self::C),
"stdcall" => Ok(Self::Stdcall),
"fastcall" => Ok(Self::Fastcall),
"thiscall" => Ok(Self::ThisCall),
"vectorcall" => Ok(Self::Vectorcall),
"aapcs" => Ok(Self::Aapcs),
"win64" => Ok(Self::Win64),
"C-unwind" => Ok(Self::CUnwind),
_ => Err(format!("Invalid or unknown ABI {:?}", s)),
}
}
}
impl std::fmt::Display for Abi {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match *self {
Self::C => "C",
Self::Stdcall => "stdcall",
Self::Fastcall => "fastcall",
Self::ThisCall => "thiscall",
Self::Vectorcall => "vectorcall",
Self::Aapcs => "aapcs",
Self::Win64 => "win64",
Self::CUnwind => "C-unwind",
};
s.fmt(f)
}
}
impl quote::ToTokens for Abi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let abi = self.to_string();
tokens.append_all(quote! { #abi });
}
}
/// An ABI extracted from a clang cursor.
#[derive(Debug, Copy, Clone)]
pub(crate) enum ClangAbi {
Known(Abi),
/// An unknown or invalid ABI.
Unknown(CXCallingConv),
}
impl ClangAbi {
/// Returns whether this Abi is known or not.
fn is_unknown(&self) -> bool {
matches!(*self, ClangAbi::Unknown(..))
}
}
impl quote::ToTokens for ClangAbi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
match *self {
Self::Known(abi) => abi.to_tokens(tokens),
Self::Unknown(cc) => panic!(
"Cannot turn unknown calling convention to tokens: {:?}",
cc
),
}
}
}
/// A function signature.
#[derive(Debug)]
pub struct FunctionSig {
/// The return type of the function.
return_type: TypeId,
/// The type of the arguments, optionally with the name of the argument when
/// declared.
argument_types: Vec<(Option<String>, TypeId)>,
/// Whether this function is variadic.
is_variadic: bool,
is_divergent: bool,
/// Whether this function's return value must be used.
must_use: bool,
/// The ABI of this function.
abi: ClangAbi,
}
fn get_abi(cc: CXCallingConv) -> ClangAbi {
use clang_sys::*;
match cc {
CXCallingConv_Default => ClangAbi::Known(Abi::C),
CXCallingConv_C => ClangAbi::Known(Abi::C),
CXCallingConv_X86StdCall => ClangAbi::Known(Abi::Stdcall),
CXCallingConv_X86FastCall => ClangAbi::Known(Abi::Fastcall),
CXCallingConv_X86ThisCall => ClangAbi::Known(Abi::ThisCall),
CXCallingConv_X86VectorCall => ClangAbi::Known(Abi::Vectorcall),
CXCallingConv_AAPCS => ClangAbi::Known(Abi::Aapcs),
CXCallingConv_X86_64Win64 => ClangAbi::Known(Abi::Win64),
other => ClangAbi::Unknown(other),
}
}
/// Get the mangled name for the cursor's referent.
pub fn cursor_mangling(
ctx: &BindgenContext,
cursor: &clang::Cursor,
) -> Option<String> {
if !ctx.options().enable_mangling {
return None;
}
// We early return here because libclang may crash in some case
// if we pass in a variable inside a partial specialized template.
// See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462.
if cursor.is_in_non_fully_specialized_template() {
return None;
}
let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor;
if let Ok(mut manglings) = cursor.cxx_manglings() {
while let Some(m) = manglings.pop() {
// Only generate the destructor group 1, see below.
if is_destructor && !m.ends_with("D1Ev") {
continue;
}
return Some(m);
}
}
let mut mangling = cursor.mangling();
if mangling.is_empty() {
return None;
}
if is_destructor {
// With old (3.8-) libclang versions, and the Itanium ABI, clang returns
// the "destructor group 0" symbol, which means that it'll try to free
// memory, which definitely isn't what we want.
//
// Explicitly force the destructor group 1 symbol.
//
// See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special
// for the reference, and http://stackoverflow.com/a/6614369/1091587 for
// a more friendly explanation.
//
// We don't need to do this for constructors since clang seems to always
// have returned the C1 constructor.
//
// FIXME(emilio): Can a legit symbol in other ABIs end with this string?
// I don't think so, but if it can this would become a linker error
// anyway, not an invalid free at runtime.
//
// TODO(emilio, #611): Use cpp_demangle if this becomes nastier with
// time.
if mangling.ends_with("D0Ev") {
let new_len = mangling.len() - 4;
mangling.truncate(new_len);
mangling.push_str("D1Ev");
}
}
Some(mangling)
}
fn args_from_ty_and_cursor(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Vec<(Option<String>, TypeId)> {
let cursor_args = cursor.args().unwrap_or_default().into_iter();
let type_args = ty.args().unwrap_or_default().into_iter();
// Argument types can be found in either the cursor or the type, but argument names may only be
// found on the cursor. We often have access to both a type and a cursor for each argument, but
// in some cases we may only have one.
//
// Prefer using the type as the source of truth for the argument's type, but fall back to
// inspecting the cursor (this happens for Objective C interfaces).
//
// Prefer using the cursor for the argument's type, but fall back to using the parent's cursor
// (this happens for function pointer return types).
cursor_args
.map(Some)
.chain(std::iter::repeat(None))
.zip(type_args.map(Some).chain(std::iter::repeat(None)))
.take_while(|(cur, ty)| cur.is_some() || ty.is_some())
.map(|(arg_cur, arg_ty)| {
let name = arg_cur.map(|a| a.spelling()).and_then(|name| {
if name.is_empty() {
None
} else {
Some(name)
}
});
let cursor = arg_cur.unwrap_or(*cursor);
let ty = arg_ty.unwrap_or_else(|| cursor.cur_type());
(name, Item::from_ty_or_ref(ty, cursor, None, ctx))
})
.collect()
}
impl FunctionSig {
/// Construct a new function signature from the given Clang type.
pub fn from_ty(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("FunctionSig::from_ty {:?} {:?}", ty, cursor);
// Skip function templates
let kind = cursor.kind();
if kind == CXCursor_FunctionTemplate {
return Err(ParseError::Continue);
}
let spelling = cursor.spelling();
// Don't parse operatorxx functions in C++
let is_operator = |spelling: &str| {
spelling.starts_with("operator") &&
!clang::is_valid_identifier(spelling)
};
if is_operator(&spelling) {
return Err(ParseError::Continue);
}
// Constructors of non-type template parameter classes for some reason
// include the template parameter in their name. Just skip them, since
// we don't handle well non-type template parameters anyway.
if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) &&
spelling.contains('<')
{
return Err(ParseError::Continue);
}
let cursor = if cursor.is_valid() {
*cursor
} else {
ty.declaration()
};
let mut args = match kind {
CXCursor_FunctionDecl |
CXCursor_Constructor |
CXCursor_CXXMethod |
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
args_from_ty_and_cursor(ty, &cursor, ctx)
}
_ => {
// For non-CXCursor_FunctionDecl, visiting the cursor's children
// is the only reliable way to get parameter names.
let mut args = vec![];
cursor.visit(|c| {
if c.kind() == CXCursor_ParmDecl {
let ty =
Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
let name = c.spelling();
let name =
if name.is_empty() { None } else { Some(name) };
args.push((name, ty));
}
CXChildVisit_Continue
});
if args.is_empty() {
// FIXME(emilio): Sometimes libclang doesn't expose the
// right AST for functions tagged as stdcall and such...
//
// https://bugs.llvm.org/show_bug.cgi?id=45919
args_from_ty_and_cursor(ty, &cursor, ctx)
} else {
args
}
}
};
let (must_use, mut is_divergent) =
if ctx.options().enable_function_attribute_detection {
let [must_use, no_return, no_return_cpp] = cursor.has_attrs(&[
Attribute::MUST_USE,
Attribute::NO_RETURN,
Attribute::NO_RETURN_CPP,
]);
(must_use, no_return || no_return_cpp)
} else {
Default::default()
};
// This looks easy to break but the clang parser keeps the type spelling clean even if
// other attributes are added.
is_divergent =
is_divergent || ty.spelling().contains("__attribute__((noreturn))");
let is_method = kind == CXCursor_CXXMethod;
let is_constructor = kind == CXCursor_Constructor;
let is_destructor = kind == CXCursor_Destructor;
if (is_constructor || is_destructor || is_method) &&
cursor.lexical_parent() != cursor.semantic_parent()
{
// Only parse constructors once.
return Err(ParseError::Continue);
}
if is_method || is_constructor || is_destructor {
let is_const = is_method && cursor.method_is_const();
let is_virtual = is_method && cursor.method_is_virtual();
let is_static = is_method && cursor.method_is_static();
if !is_static && !is_virtual {
let parent = cursor.semantic_parent();
let class = Item::parse(parent, None, ctx)
.expect("Expected to parse the class");
// The `class` most likely is not finished parsing yet, so use
// the unchecked variant.
let class = class.as_type_id_unchecked();
let class = if is_const {
let const_class_id = ctx.next_item_id();
ctx.build_const_wrapper(
const_class_id,
class,
None,
&parent.cur_type(),
)
} else {
class
};
let ptr =
Item::builtin_type(TypeKind::Pointer(class), false, ctx);
args.insert(0, (Some("this".into()), ptr));
} else if is_virtual {
let void = Item::builtin_type(TypeKind::Void, false, ctx);
let ptr =
Item::builtin_type(TypeKind::Pointer(void), false, ctx);
args.insert(0, (Some("this".into()), ptr));
}
}
let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl ||
kind == CXCursor_ObjCClassMethodDecl
{
ty.ret_type()
.or_else(|| cursor.ret_type())
.ok_or(ParseError::Continue)?
} else {
ty.ret_type().ok_or(ParseError::Continue)?
};
let ret = if is_constructor && ctx.is_target_wasm32() {
// Constructors in Clang wasm32 target return a pointer to the object
// being constructed.
let void = Item::builtin_type(TypeKind::Void, false, ctx);
Item::builtin_type(TypeKind::Pointer(void), false, ctx)
} else {
Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx)
};
// Clang plays with us at "find the calling convention", see #549 and
// co. This seems to be a better fix than that commit.
let mut call_conv = ty.call_conv();
if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() {
let cursor_call_conv = ty.call_conv();
if cursor_call_conv != CXCallingConv_Invalid {
call_conv = cursor_call_conv;
}
}
let abi = get_abi(call_conv);
if abi.is_unknown() {
warn!("Unknown calling convention: {:?}", call_conv);
}
Ok(FunctionSig {
return_type: ret,
argument_types: args,
is_variadic: ty.is_variadic(),
is_divergent,
must_use,
abi,
})
}
/// Get this function signature's return type.
pub fn return_type(&self) -> TypeId {
self.return_type
}
/// Get this function signature's argument (name, type) pairs.
pub fn argument_types(&self) -> &[(Option<String>, TypeId)] {
&self.argument_types
}
/// Get this function signature's ABI.
pub(crate) fn abi(
&self,
ctx: &BindgenContext,
name: Option<&str>,
) -> ClangAbi {
// FIXME (pvdrz): Try to do this check lazily instead. Maybe store the ABI inside `ctx`
// instead?.
if let Some(name) = name {
if let Some((abi, _)) = ctx
.options()
.abi_overrides
.iter()
.find(|(_, regex_set)| regex_set.matches(name))
{
ClangAbi::Known(*abi)
} else {
self.abi
}
} else {
self.abi
}
}
/// Is this function signature variadic?
pub fn is_variadic(&self) -> bool {
// Clang reports some functions as variadic when they *might* be
// variadic. We do the argument check because rust doesn't codegen well
// variadic functions without an initial argument.
self.is_variadic && !self.argument_types.is_empty()
}
/// Must this function's return value be used?
pub fn must_use(&self) -> bool {
self.must_use
}
/// Are function pointers with this signature able to derive Rust traits?
/// Rust only supports deriving traits for function pointers with a limited
/// number of parameters and a couple ABIs.
///
/// For more details, see:
///
/// * https://github.com/rust-lang/rust-bindgen/issues/547,
/// * https://github.com/rust-lang/rust/issues/38848,
/// * and https://github.com/rust-lang/rust/issues/40158
pub fn function_pointers_can_derive(&self) -> bool {
if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT {
return false;
}
matches!(self.abi, ClangAbi::Known(Abi::C) | ClangAbi::Unknown(..))
}
pub(crate) fn is_divergent(&self) -> bool {
self.is_divergent
}
}
impl ClangSubItemParser for Function {
fn parse(
cursor: clang::Cursor,
context: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
let kind = match FunctionKind::from_cursor(&cursor) {
None => return Err(ParseError::Continue),
Some(k) => k,
};
debug!("Function::parse({:?}, {:?})", cursor, cursor.cur_type());
let visibility = cursor.visibility();
if visibility != CXVisibility_Default {
return Err(ParseError::Continue);
}
if cursor.access_specifier() == CX_CXXPrivate {
return Err(ParseError::Continue);
}
if cursor.is_inlined_function() ||
cursor
.definition()
.map_or(false, |x| x.is_inlined_function())
{
if !context.options().generate_inline_functions {
return Err(ParseError::Continue);
}
if cursor.is_deleted_function() {
return Err(ParseError::Continue);
}
}
let linkage = cursor.linkage();
let linkage = match linkage {
CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External,
CXLinkage_Internal => Linkage::Internal,
_ => return Err(ParseError::Continue),
};
// Grab the signature using Item::from_ty.
let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?;
let mut name = cursor.spelling();
assert!(!name.is_empty(), "Empty function name?");
if cursor.kind() == CXCursor_Destructor {
// Remove the leading `~`. The alternative to this is special-casing
// code-generation for destructor functions, which seems less than
// ideal.
if name.starts_with('~') {
name.remove(0);
}
// Add a suffix to avoid colliding with constructors. This would be
// technically fine (since we handle duplicated functions/methods),
// but seems easy enough to handle it here.
name.push_str("_destructor");
}
if let Some(nm) = context
.options()
.last_callback(|callbacks| callbacks.generated_name_override(&name))
{
name = nm;
}
assert!(!name.is_empty(), "Empty function name.");
let mangled_name = cursor_mangling(context, &cursor);
let comment = cursor.raw_comment();
let function =
Self::new(name, mangled_name, sig, comment, kind, linkage);
Ok(ParseResult::New(function, Some(cursor)))
}
}
impl Trace for FunctionSig {
type Extra = ();
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn);
for &(_, ty) in self.argument_types() {
tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter);
}
}
}

127
third-party/vendor/bindgen/ir/int.rs vendored Normal file
View file

@ -0,0 +1,127 @@
//! Intermediate representation for integral types.
/// Which integral type are we dealing with?
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum IntKind {
/// A `bool`.
Bool,
/// A `signed char`.
SChar,
/// An `unsigned char`.
UChar,
/// An `wchar_t`.
WChar,
/// A platform-dependent `char` type, with the signedness support.
Char {
/// Whether the char is signed for the target platform.
is_signed: bool,
},
/// A `short`.
Short,
/// An `unsigned short`.
UShort,
/// An `int`.
Int,
/// An `unsigned int`.
UInt,
/// A `long`.
Long,
/// An `unsigned long`.
ULong,
/// A `long long`.
LongLong,
/// An `unsigned long long`.
ULongLong,
/// A 8-bit signed integer.
I8,
/// A 8-bit unsigned integer.
U8,
/// A 16-bit signed integer.
I16,
/// Either a `char16_t` or a `wchar_t`.
U16,
/// A 32-bit signed integer.
I32,
/// A 32-bit unsigned integer.
U32,
/// A 64-bit signed integer.
I64,
/// A 64-bit unsigned integer.
U64,
/// An `int128_t`
I128,
/// A `uint128_t`.
U128,
/// A custom integer type, used to allow custom macro types depending on
/// range.
Custom {
/// The name of the type, which would be used without modification.
name: &'static str,
/// Whether the type is signed or not.
is_signed: bool,
},
}
impl IntKind {
/// Is this integral type signed?
pub fn is_signed(&self) -> bool {
use self::IntKind::*;
match *self {
// TODO(emilio): wchar_t can in theory be signed, but we have no way
// to know whether it is or not right now (unlike char, there's no
// WChar_S / WChar_U).
Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 |
WChar | U32 | U64 | U128 => false,
SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 |
I128 => true,
Char { is_signed } => is_signed,
Custom { is_signed, .. } => is_signed,
}
}
/// If this type has a known size, return it (in bytes). This is to
/// alleviate libclang sometimes not giving us a layout (like in the case
/// when an enum is defined inside a class with template parameters).
pub fn known_size(&self) -> Option<usize> {
use self::IntKind::*;
Some(match *self {
Bool | UChar | SChar | U8 | I8 | Char { .. } => 1,
U16 | I16 => 2,
U32 | I32 => 4,
U64 | I64 => 8,
I128 | U128 => 16,
_ => return None,
})
}
/// Whether this type's signedness matches the value.
pub fn signedness_matches(&self, val: i64) -> bool {
val >= 0 || self.is_signed()
}
}

2017
third-party/vendor/bindgen/ir/item.rs vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,147 @@
//! Different variants of an `Item` in our intermediate representation.
use super::context::BindgenContext;
use super::dot::DotAttributes;
use super::function::Function;
use super::module::Module;
use super::ty::Type;
use super::var::Var;
use std::io;
/// A item we parse and translate.
#[derive(Debug)]
pub enum ItemKind {
/// A module, created implicitly once (the root module), or via C++
/// namespaces.
Module(Module),
/// A type declared in any of the multiple ways it can be declared.
Type(Type),
/// A function or method declaration.
Function(Function),
/// A variable declaration, most likely a static.
Var(Var),
}
impl ItemKind {
/// Get a reference to this `ItemKind`'s underying `Module`, or `None` if it
/// is some other kind.
pub fn as_module(&self) -> Option<&Module> {
match *self {
ItemKind::Module(ref module) => Some(module),
_ => None,
}
}
/// Transform our `ItemKind` into a string.
pub fn kind_name(&self) -> &'static str {
match *self {
ItemKind::Module(..) => "Module",
ItemKind::Type(..) => "Type",
ItemKind::Function(..) => "Function",
ItemKind::Var(..) => "Var",
}
}
/// Is this a module?
pub fn is_module(&self) -> bool {
self.as_module().is_some()
}
/// Get a reference to this `ItemKind`'s underying `Module`, or panic if it
/// is some other kind.
pub fn expect_module(&self) -> &Module {
self.as_module().expect("Not a module")
}
/// Get a reference to this `ItemKind`'s underying `Function`, or `None` if
/// it is some other kind.
pub fn as_function(&self) -> Option<&Function> {
match *self {
ItemKind::Function(ref func) => Some(func),
_ => None,
}
}
/// Is this a function?
pub fn is_function(&self) -> bool {
self.as_function().is_some()
}
/// Get a reference to this `ItemKind`'s underying `Function`, or panic if
/// it is some other kind.
pub fn expect_function(&self) -> &Function {
self.as_function().expect("Not a function")
}
/// Get a reference to this `ItemKind`'s underying `Type`, or `None` if
/// it is some other kind.
pub fn as_type(&self) -> Option<&Type> {
match *self {
ItemKind::Type(ref ty) => Some(ty),
_ => None,
}
}
/// Get a mutable reference to this `ItemKind`'s underying `Type`, or `None`
/// if it is some other kind.
pub fn as_type_mut(&mut self) -> Option<&mut Type> {
match *self {
ItemKind::Type(ref mut ty) => Some(ty),
_ => None,
}
}
/// Is this a type?
pub fn is_type(&self) -> bool {
self.as_type().is_some()
}
/// Get a reference to this `ItemKind`'s underying `Type`, or panic if it is
/// some other kind.
pub fn expect_type(&self) -> &Type {
self.as_type().expect("Not a type")
}
/// Get a reference to this `ItemKind`'s underying `Var`, or `None` if it is
/// some other kind.
pub fn as_var(&self) -> Option<&Var> {
match *self {
ItemKind::Var(ref v) => Some(v),
_ => None,
}
}
/// Is this a variable?
pub fn is_var(&self) -> bool {
self.as_var().is_some()
}
/// Get a reference to this `ItemKind`'s underying `Var`, or panic if it is
/// some other kind.
pub fn expect_var(&self) -> &Var {
self.as_var().expect("Not a var")
}
}
impl DotAttributes for ItemKind {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>kind</td><td>{}</td></tr>", self.kind_name())?;
match *self {
ItemKind::Module(ref module) => module.dot_attributes(ctx, out),
ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out),
ItemKind::Function(ref func) => func.dot_attributes(ctx, out),
ItemKind::Var(ref var) => var.dot_attributes(ctx, out),
}
}
}

143
third-party/vendor/bindgen/ir/layout.rs vendored Normal file
View file

@ -0,0 +1,143 @@
//! Intermediate representation for the physical layout of some type.
use super::derive::CanDerive;
use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
use crate::clang;
use crate::ir::context::BindgenContext;
use std::cmp;
/// A type that represents the struct layout of a type.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Layout {
/// The size (in bytes) of this layout.
pub size: usize,
/// The alignment (in bytes) of this layout.
pub align: usize,
/// Whether this layout's members are packed or not.
pub packed: bool,
}
#[test]
fn test_layout_for_size() {
use std::mem;
let ptr_size = mem::size_of::<*mut ()>();
assert_eq!(
Layout::for_size_internal(ptr_size, ptr_size),
Layout::new(ptr_size, ptr_size)
);
assert_eq!(
Layout::for_size_internal(ptr_size, 3 * ptr_size),
Layout::new(3 * ptr_size, ptr_size)
);
}
impl Layout {
/// Gets the integer type name for a given known size.
pub fn known_type_for_size(
ctx: &BindgenContext,
size: usize,
) -> Option<&'static str> {
Some(match size {
16 if ctx.options().rust_features.i128_and_u128 => "u128",
8 => "u64",
4 => "u32",
2 => "u16",
1 => "u8",
_ => return None,
})
}
/// Construct a new `Layout` with the given `size` and `align`. It is not
/// packed.
pub fn new(size: usize, align: usize) -> Self {
Layout {
size,
align,
packed: false,
}
}
fn for_size_internal(ptr_size: usize, size: usize) -> Self {
let mut next_align = 2;
while size % next_align == 0 && next_align <= ptr_size {
next_align *= 2;
}
Layout {
size,
align: next_align / 2,
packed: false,
}
}
/// Creates a non-packed layout for a given size, trying to use the maximum
/// alignment possible.
pub fn for_size(ctx: &BindgenContext, size: usize) -> Self {
Self::for_size_internal(ctx.target_pointer_size(), size)
}
/// Is this a zero-sized layout?
pub fn is_zero(&self) -> bool {
self.size == 0 && self.align == 0
}
/// Construct a zero-sized layout.
pub fn zero() -> Self {
Self::new(0, 0)
}
/// Get this layout as an opaque type.
pub fn opaque(&self) -> Opaque {
Opaque(*self)
}
}
/// When we are treating a type as opaque, it is just a blob with a `Layout`.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Opaque(pub Layout);
impl Opaque {
/// Construct a new opaque type from the given clang type.
pub fn from_clang_ty(ty: &clang::Type, ctx: &BindgenContext) -> Type {
let layout = Layout::new(ty.size(ctx), ty.align(ctx));
let ty_kind = TypeKind::Opaque;
let is_const = ty.is_const();
Type::new(None, Some(layout), ty_kind, is_const)
}
/// Return the known rust type we should use to create a correctly-aligned
/// field with this layout.
pub fn known_rust_type_for_array(
&self,
ctx: &BindgenContext,
) -> Option<&'static str> {
Layout::known_type_for_size(ctx, self.0.align)
}
/// Return the array size that an opaque type for this layout should have if
/// we know the correct type for it, or `None` otherwise.
pub fn array_size(&self, ctx: &BindgenContext) -> Option<usize> {
if self.known_rust_type_for_array(ctx).is_some() {
Some(self.0.size / cmp::max(self.0.align, 1))
} else {
None
}
}
/// Return `true` if this opaque layout's array size will fit within the
/// maximum number of array elements that Rust allows deriving traits
/// with. Return `false` otherwise.
pub fn array_size_within_derive_limit(
&self,
ctx: &BindgenContext,
) -> CanDerive {
if self
.array_size(ctx)
.map_or(false, |size| size <= RUST_DERIVE_IN_ARRAY_LIMIT)
{
CanDerive::Yes
} else {
CanDerive::Manually
}
}
}

24
third-party/vendor/bindgen/ir/mod.rs vendored Normal file
View file

@ -0,0 +1,24 @@
//! The ir module defines bindgen's intermediate representation.
//!
//! Parsing C/C++ generates the IR, while code generation outputs Rust code from
//! the IR.
pub mod analysis;
pub mod annotations;
pub mod comment;
pub mod comp;
pub mod context;
pub mod derive;
pub mod dot;
pub mod enum_ty;
pub mod function;
pub mod int;
pub mod item;
pub mod item_kind;
pub mod layout;
pub mod module;
pub mod objc;
pub mod template;
pub mod traversal;
pub mod ty;
pub mod var;

95
third-party/vendor/bindgen/ir/module.rs vendored Normal file
View file

@ -0,0 +1,95 @@
//! Intermediate representation for modules (AKA C++ namespaces).
use super::context::BindgenContext;
use super::dot::DotAttributes;
use super::item::ItemSet;
use crate::clang;
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use crate::parse_one;
use std::io;
/// Whether this module is inline or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ModuleKind {
/// This module is not inline.
Normal,
/// This module is inline, as in `inline namespace foo {}`.
Inline,
}
/// A module, as in, a C++ namespace.
#[derive(Clone, Debug)]
pub struct Module {
/// The name of the module, or none if it's anonymous.
name: Option<String>,
/// The kind of module this is.
kind: ModuleKind,
/// The children of this module, just here for convenience.
children: ItemSet,
}
impl Module {
/// Construct a new `Module`.
pub fn new(name: Option<String>, kind: ModuleKind) -> Self {
Module {
name,
kind,
children: ItemSet::new(),
}
}
/// Get this module's name.
pub fn name(&self) -> Option<&str> {
self.name.as_deref()
}
/// Get a mutable reference to this module's children.
pub fn children_mut(&mut self) -> &mut ItemSet {
&mut self.children
}
/// Get this module's children.
pub fn children(&self) -> &ItemSet {
&self.children
}
/// Whether this namespace is inline.
pub fn is_inline(&self) -> bool {
self.kind == ModuleKind::Inline
}
}
impl DotAttributes for Module {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>ModuleKind</td><td>{:?}</td></tr>", self.kind)
}
}
impl ClangSubItemParser for Module {
fn parse(
cursor: clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
match cursor.kind() {
CXCursor_Namespace => {
let module_id = ctx.module(cursor);
ctx.with_module(module_id, |ctx| {
cursor.visit(|cursor| {
parse_one(ctx, cursor, Some(module_id.into()))
})
});
Ok(ParseResult::AlreadyResolved(module_id.into()))
}
_ => Err(ParseError::Continue),
}
}
}

329
third-party/vendor/bindgen/ir/objc.rs vendored Normal file
View file

@ -0,0 +1,329 @@
//! Objective C types
use super::context::{BindgenContext, ItemId};
use super::function::FunctionSig;
use super::item::Item;
use super::traversal::{Trace, Tracer};
use super::ty::TypeKind;
use crate::clang;
use crate::parse::ClangItemParser;
use clang_sys::CXChildVisit_Continue;
use clang_sys::CXCursor_ObjCCategoryDecl;
use clang_sys::CXCursor_ObjCClassMethodDecl;
use clang_sys::CXCursor_ObjCClassRef;
use clang_sys::CXCursor_ObjCInstanceMethodDecl;
use clang_sys::CXCursor_ObjCProtocolDecl;
use clang_sys::CXCursor_ObjCProtocolRef;
use clang_sys::CXCursor_ObjCSuperClassRef;
use clang_sys::CXCursor_TemplateTypeParameter;
use proc_macro2::{Ident, Span, TokenStream};
/// Objective C interface as used in TypeKind
///
/// Also protocols and categories are parsed as this type
#[derive(Debug)]
pub struct ObjCInterface {
/// The name
/// like, NSObject
name: String,
category: Option<String>,
is_protocol: bool,
/// The list of template names almost always, ObjectType or KeyType
pub template_names: Vec<String>,
/// The list of protocols that this interface conforms to.
pub conforms_to: Vec<ItemId>,
/// The direct parent for this interface.
pub parent_class: Option<ItemId>,
/// List of the methods defined in this interfae
methods: Vec<ObjCMethod>,
class_methods: Vec<ObjCMethod>,
}
/// The objective c methods
#[derive(Debug)]
pub struct ObjCMethod {
/// The original method selector name
/// like, dataWithBytes:length:
name: String,
/// Method name as converted to rust
/// like, dataWithBytes_length_
rust_name: String,
signature: FunctionSig,
/// Is class method?
is_class_method: bool,
}
impl ObjCInterface {
fn new(name: &str) -> ObjCInterface {
ObjCInterface {
name: name.to_owned(),
category: None,
is_protocol: false,
template_names: Vec::new(),
parent_class: None,
conforms_to: Vec::new(),
methods: Vec::new(),
class_methods: Vec::new(),
}
}
/// The name
/// like, NSObject
pub fn name(&self) -> &str {
self.name.as_ref()
}
/// Formats the name for rust
/// Can be like NSObject, but with categories might be like NSObject_NSCoderMethods
/// and protocols are like PNSObject
pub fn rust_name(&self) -> String {
if let Some(ref cat) = self.category {
format!("{}_{}", self.name(), cat)
} else if self.is_protocol {
format!("P{}", self.name())
} else {
format!("I{}", self.name().to_owned())
}
}
/// Is this a template interface?
pub fn is_template(&self) -> bool {
!self.template_names.is_empty()
}
/// List of the methods defined in this interface
pub fn methods(&self) -> &Vec<ObjCMethod> {
&self.methods
}
/// Is this a protocol?
pub fn is_protocol(&self) -> bool {
self.is_protocol
}
/// Is this a category?
pub fn is_category(&self) -> bool {
self.category.is_some()
}
/// List of the class methods defined in this interface
pub fn class_methods(&self) -> &Vec<ObjCMethod> {
&self.class_methods
}
/// Parses the Objective C interface from the cursor
pub fn from_ty(
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Option<Self> {
let name = cursor.spelling();
let mut interface = Self::new(&name);
if cursor.kind() == CXCursor_ObjCProtocolDecl {
interface.is_protocol = true;
}
cursor.visit(|c| {
match c.kind() {
CXCursor_ObjCClassRef => {
if cursor.kind() == CXCursor_ObjCCategoryDecl {
// We are actually a category extension, and we found the reference
// to the original interface, so name this interface approriately
interface.name = c.spelling();
interface.category = Some(cursor.spelling());
}
}
CXCursor_ObjCProtocolRef => {
// Gather protocols this interface conforms to
let needle = format!("P{}", c.spelling());
let items_map = ctx.items();
debug!(
"Interface {} conforms to {}, find the item",
interface.name, needle
);
for (id, item) in items_map {
if let Some(ty) = item.as_type() {
if let TypeKind::ObjCInterface(ref protocol) =
*ty.kind()
{
if protocol.is_protocol {
debug!(
"Checking protocol {}, ty.name {:?}",
protocol.name,
ty.name()
);
if Some(needle.as_ref()) == ty.name() {
debug!(
"Found conforming protocol {:?}",
item
);
interface.conforms_to.push(id);
break;
}
}
}
}
}
}
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
let name = c.spelling();
let signature =
FunctionSig::from_ty(&c.cur_type(), &c, ctx)
.expect("Invalid function sig");
let is_class_method =
c.kind() == CXCursor_ObjCClassMethodDecl;
let method =
ObjCMethod::new(&name, signature, is_class_method);
interface.add_method(method);
}
CXCursor_TemplateTypeParameter => {
let name = c.spelling();
interface.template_names.push(name);
}
CXCursor_ObjCSuperClassRef => {
let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
interface.parent_class = Some(item.into());
}
_ => {}
}
CXChildVisit_Continue
});
Some(interface)
}
fn add_method(&mut self, method: ObjCMethod) {
if method.is_class_method {
self.class_methods.push(method);
} else {
self.methods.push(method);
}
}
}
impl ObjCMethod {
fn new(
name: &str,
signature: FunctionSig,
is_class_method: bool,
) -> ObjCMethod {
let split_name: Vec<&str> = name.split(':').collect();
let rust_name = split_name.join("_");
ObjCMethod {
name: name.to_owned(),
rust_name,
signature,
is_class_method,
}
}
/// The original method selector name
/// like, dataWithBytes:length:
pub fn name(&self) -> &str {
self.name.as_ref()
}
/// Method name as converted to rust
/// like, dataWithBytes_length_
pub fn rust_name(&self) -> &str {
self.rust_name.as_ref()
}
/// Returns the methods signature as FunctionSig
pub fn signature(&self) -> &FunctionSig {
&self.signature
}
/// Is this a class method?
pub fn is_class_method(&self) -> bool {
self.is_class_method
}
/// Formats the method call
pub fn format_method_call(&self, args: &[TokenStream]) -> TokenStream {
let split_name: Vec<Option<Ident>> = self
.name
.split(':')
.map(|name| {
if name.is_empty() {
None
} else {
Some(Ident::new(name, Span::call_site()))
}
})
.collect();
// No arguments
if args.is_empty() && split_name.len() == 1 {
let name = &split_name[0];
return quote! {
#name
};
}
// Check right amount of arguments
assert!(
args.len() == split_name.len() - 1,
"Incorrect method name or arguments for objc method, {:?} vs {:?}",
args,
split_name
);
// Get arguments without type signatures to pass to `msg_send!`
let mut args_without_types = vec![];
for arg in args.iter() {
let arg = arg.to_string();
let name_and_sig: Vec<&str> = arg.split(' ').collect();
let name = name_and_sig[0];
args_without_types.push(Ident::new(name, Span::call_site()))
}
let args = split_name.into_iter().zip(args_without_types).map(
|(arg, arg_val)| {
if let Some(arg) = arg {
quote! { #arg: #arg_val }
} else {
quote! { #arg_val: #arg_val }
}
},
);
quote! {
#( #args )*
}
}
}
impl Trace for ObjCInterface {
type Extra = ();
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
for method in &self.methods {
method.signature.trace(context, tracer, &());
}
for class_method in &self.class_methods {
class_method.signature.trace(context, tracer, &());
}
for protocol in &self.conforms_to {
tracer.visit(*protocol);
}
}
}

View file

@ -0,0 +1,343 @@
//! Template declaration and instantiation related things.
//!
//! The nomenclature surrounding templates is often confusing, so here are a few
//! brief definitions:
//!
//! * "Template definition": a class/struct/alias/function definition that takes
//! generic template parameters. For example:
//!
//! ```c++
//! template<typename T>
//! class List<T> {
//! // ...
//! };
//! ```
//!
//! * "Template instantiation": an instantiation is a use of a template with
//! concrete template arguments. For example, `List<int>`.
//!
//! * "Template specialization": an alternative template definition providing a
//! custom definition for instantiations with the matching template
//! arguments. This C++ feature is unsupported by bindgen. For example:
//!
//! ```c++
//! template<>
//! class List<int> {
//! // Special layout for int lists...
//! };
//! ```
use super::context::{BindgenContext, ItemId, TypeId};
use super::item::{IsOpaque, Item, ItemAncestors};
use super::traversal::{EdgeKind, Trace, Tracer};
use crate::clang;
use crate::parse::ClangItemParser;
/// Template declaration (and such declaration's template parameters) related
/// methods.
///
/// This trait's methods distinguish between `None` and `Some([])` for
/// declarations that are not templates and template declarations with zero
/// parameters, in general.
///
/// Consider this example:
///
/// ```c++
/// template <typename T, typename U>
/// class Foo {
/// T use_of_t;
/// U use_of_u;
///
/// template <typename V>
/// using Bar = V*;
///
/// class Inner {
/// T x;
/// U y;
/// Bar<int> z;
/// };
///
/// template <typename W>
/// class Lol {
/// // No use of W, but here's a use of T.
/// T t;
/// };
///
/// template <typename X>
/// class Wtf {
/// // X is not used because W is not used.
/// Lol<X> lololol;
/// };
/// };
///
/// class Qux {
/// int y;
/// };
/// ```
///
/// The following table depicts the results of each trait method when invoked on
/// each of the declarations above:
///
/// +------+----------------------+--------------------------+------------------------+----
/// |Decl. | self_template_params | num_self_template_params | all_template_parameters| ...
/// +------+----------------------+--------------------------+------------------------+----
/// |Foo | [T, U] | 2 | [T, U] | ...
/// |Bar | [V] | 1 | [T, U, V] | ...
/// |Inner | [] | 0 | [T, U] | ...
/// |Lol | [W] | 1 | [T, U, W] | ...
/// |Wtf | [X] | 1 | [T, U, X] | ...
/// |Qux | [] | 0 | [] | ...
/// +------+----------------------+--------------------------+------------------------+----
///
/// ----+------+-----+----------------------+
/// ... |Decl. | ... | used_template_params |
/// ----+------+-----+----------------------+
/// ... |Foo | ... | [T, U] |
/// ... |Bar | ... | [V] |
/// ... |Inner | ... | [] |
/// ... |Lol | ... | [T] |
/// ... |Wtf | ... | [T] |
/// ... |Qux | ... | [] |
/// ----+------+-----+----------------------+
pub trait TemplateParameters: Sized {
/// Get the set of `ItemId`s that make up this template declaration's free
/// template parameters.
///
/// Note that these might *not* all be named types: C++ allows
/// constant-value template parameters as well as template-template
/// parameters. Of course, Rust does not allow generic parameters to be
/// anything but types, so we must treat them as opaque, and avoid
/// instantiating them.
fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>;
/// Get the number of free template parameters this template declaration
/// has.
fn num_self_template_params(&self, ctx: &BindgenContext) -> usize {
self.self_template_params(ctx).len()
}
/// Get the complete set of template parameters that can affect this
/// declaration.
///
/// Note that this item doesn't need to be a template declaration itself for
/// `Some` to be returned here (in contrast to `self_template_params`). If
/// this item is a member of a template declaration, then the parent's
/// template parameters are included here.
///
/// In the example above, `Inner` depends on both of the `T` and `U` type
/// parameters, even though it is not itself a template declaration and
/// therefore has no type parameters itself. Perhaps it helps to think about
/// how we would fully reference such a member type in C++:
/// `Foo<int,char>::Inner`. `Foo` *must* be instantiated with template
/// arguments before we can gain access to the `Inner` member type.
fn all_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>
where
Self: ItemAncestors,
{
let mut ancestors: Vec<_> = self.ancestors(ctx).collect();
ancestors.reverse();
ancestors
.into_iter()
.flat_map(|id| id.self_template_params(ctx).into_iter())
.collect()
}
/// Get only the set of template parameters that this item uses. This is a
/// subset of `all_template_params` and does not necessarily contain any of
/// `self_template_params`.
fn used_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>
where
Self: AsRef<ItemId>,
{
assert!(
ctx.in_codegen_phase(),
"template parameter usage is not computed until codegen"
);
let id = *self.as_ref();
ctx.resolve_item(id)
.all_template_params(ctx)
.into_iter()
.filter(|p| ctx.uses_template_parameter(id, *p))
.collect()
}
}
/// A trait for things which may or may not be a named template type parameter.
pub trait AsTemplateParam {
/// Any extra information the implementor might need to make this decision.
type Extra;
/// Convert this thing to the item id of a named template type parameter.
fn as_template_param(
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> Option<TypeId>;
/// Is this a named template type parameter?
fn is_template_param(
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> bool {
self.as_template_param(ctx, extra).is_some()
}
}
/// A concrete instantiation of a generic template.
#[derive(Clone, Debug)]
pub struct TemplateInstantiation {
/// The template definition which this is instantiating.
definition: TypeId,
/// The concrete template arguments, which will be substituted in the
/// definition for the generic template parameters.
args: Vec<TypeId>,
}
impl TemplateInstantiation {
/// Construct a new template instantiation from the given parts.
pub fn new<I>(definition: TypeId, args: I) -> TemplateInstantiation
where
I: IntoIterator<Item = TypeId>,
{
TemplateInstantiation {
definition,
args: args.into_iter().collect(),
}
}
/// Get the template definition for this instantiation.
pub fn template_definition(&self) -> TypeId {
self.definition
}
/// Get the concrete template arguments used in this instantiation.
pub fn template_arguments(&self) -> &[TypeId] {
&self.args[..]
}
/// Parse a `TemplateInstantiation` from a clang `Type`.
pub fn from_ty(
ty: &clang::Type,
ctx: &mut BindgenContext,
) -> Option<TemplateInstantiation> {
use clang_sys::*;
let template_args = ty.template_args().map_or(vec![], |args| match ty
.canonical_type()
.template_args()
{
Some(canonical_args) => {
let arg_count = args.len();
args.chain(canonical_args.skip(arg_count))
.filter(|t| t.kind() != CXType_Invalid)
.map(|t| {
Item::from_ty_or_ref(t, t.declaration(), None, ctx)
})
.collect()
}
None => args
.filter(|t| t.kind() != CXType_Invalid)
.map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx))
.collect(),
});
let declaration = ty.declaration();
let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl
{
Some(declaration)
} else {
declaration.specialized().or_else(|| {
let mut template_ref = None;
ty.declaration().visit(|child| {
if child.kind() == CXCursor_TemplateRef {
template_ref = Some(child);
return CXVisit_Break;
}
// Instantiations of template aliases might have the
// TemplateRef to the template alias definition arbitrarily
// deep, so we need to recurse here and not only visit
// direct children.
CXChildVisit_Recurse
});
template_ref.and_then(|cur| cur.referenced())
})
};
let definition = match definition {
Some(def) => def,
None => {
if !ty.declaration().is_builtin() {
warn!(
"Could not find template definition for template \
instantiation"
);
}
return None;
}
};
let template_definition =
Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx);
Some(TemplateInstantiation::new(
template_definition,
template_args,
))
}
}
impl IsOpaque for TemplateInstantiation {
type Extra = Item;
/// Is this an opaque template instantiation?
fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool {
if self.template_definition().is_opaque(ctx, &()) {
return true;
}
// TODO(#774): This doesn't properly handle opaque instantiations where
// an argument is itself an instantiation because `canonical_name` does
// not insert the template arguments into the name, ie it for nested
// template arguments it creates "Foo" instead of "Foo<int>". The fully
// correct fix is to make `canonical_{name,path}` include template
// arguments properly.
let mut path = item.path_for_allowlisting(ctx).clone();
let args: Vec<_> = self
.template_arguments()
.iter()
.map(|arg| {
let arg_path =
ctx.resolve_item(*arg).path_for_allowlisting(ctx);
arg_path[1..].join("::")
})
.collect();
{
let last = path.last_mut().unwrap();
last.push('<');
last.push_str(&args.join(", "));
last.push('>');
}
ctx.opaque_by_name(&path)
}
}
impl Trace for TemplateInstantiation {
type Extra = ();
fn trace<T>(&self, _ctx: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer
.visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration);
for arg in self.template_arguments() {
tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument);
}
}
}

View file

@ -0,0 +1,478 @@
//! Traversal of the graph of IR items and types.
use super::context::{BindgenContext, ItemId};
use super::item::ItemSet;
use std::collections::{BTreeMap, VecDeque};
/// An outgoing edge in the IR graph is a reference from some item to another
/// item:
///
/// from --> to
///
/// The `from` is left implicit: it is the concrete `Trace` implementer which
/// yielded this outgoing edge.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Edge {
to: ItemId,
kind: EdgeKind,
}
impl Edge {
/// Construct a new edge whose referent is `to` and is of the given `kind`.
pub fn new(to: ItemId, kind: EdgeKind) -> Edge {
Edge { to, kind }
}
}
impl From<Edge> for ItemId {
fn from(val: Edge) -> Self {
val.to
}
}
/// The kind of edge reference. This is useful when we wish to only consider
/// certain kinds of edges for a particular traversal or analysis.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum EdgeKind {
/// A generic, catch-all edge.
Generic,
/// An edge from a template declaration, to the definition of a named type
/// parameter. For example, the edge from `Foo<T>` to `T` in the following
/// snippet:
///
/// ```C++
/// template<typename T>
/// class Foo { };
/// ```
TemplateParameterDefinition,
/// An edge from a template instantiation to the template declaration that
/// is being instantiated. For example, the edge from `Foo<int>` to
/// to `Foo<T>`:
///
/// ```C++
/// template<typename T>
/// class Foo { };
///
/// using Bar = Foo<ant>;
/// ```
TemplateDeclaration,
/// An edge from a template instantiation to its template argument. For
/// example, `Foo<Bar>` to `Bar`:
///
/// ```C++
/// template<typename T>
/// class Foo { };
///
/// class Bar { };
///
/// using FooBar = Foo<Bar>;
/// ```
TemplateArgument,
/// An edge from a compound type to one of its base member types. For
/// example, the edge from `Bar` to `Foo`:
///
/// ```C++
/// class Foo { };
///
/// class Bar : public Foo { };
/// ```
BaseMember,
/// An edge from a compound type to the types of one of its fields. For
/// example, the edge from `Foo` to `int`:
///
/// ```C++
/// class Foo {
/// int x;
/// };
/// ```
Field,
/// An edge from an class or struct type to an inner type member. For
/// example, the edge from `Foo` to `Foo::Bar` here:
///
/// ```C++
/// class Foo {
/// struct Bar { };
/// };
/// ```
InnerType,
/// An edge from an class or struct type to an inner static variable. For
/// example, the edge from `Foo` to `Foo::BAR` here:
///
/// ```C++
/// class Foo {
/// static const char* BAR;
/// };
/// ```
InnerVar,
/// An edge from a class or struct type to one of its method functions. For
/// example, the edge from `Foo` to `Foo::bar`:
///
/// ```C++
/// class Foo {
/// bool bar(int x, int y);
/// };
/// ```
Method,
/// An edge from a class or struct type to one of its constructor
/// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`:
///
/// ```C++
/// class Foo {
/// int my_x;
/// int my_y;
///
/// public:
/// Foo(int x, int y);
/// };
/// ```
Constructor,
/// An edge from a class or struct type to its destructor function. For
/// example, the edge from `Doggo` to `Doggo::~Doggo()`:
///
/// ```C++
/// struct Doggo {
/// char* wow;
///
/// public:
/// ~Doggo();
/// };
/// ```
Destructor,
/// An edge from a function declaration to its return type. For example, the
/// edge from `foo` to `int`:
///
/// ```C++
/// int foo(char* string);
/// ```
FunctionReturn,
/// An edge from a function declaration to one of its parameter types. For
/// example, the edge from `foo` to `char*`:
///
/// ```C++
/// int foo(char* string);
/// ```
FunctionParameter,
/// An edge from a static variable to its type. For example, the edge from
/// `FOO` to `const char*`:
///
/// ```C++
/// static const char* FOO;
/// ```
VarType,
/// An edge from a non-templated alias or typedef to the referenced type.
TypeReference,
}
/// A predicate to allow visiting only sub-sets of the whole IR graph by
/// excluding certain edges from being followed by the traversal.
///
/// The predicate must return true if the traversal should follow this edge
/// and visit everything that is reachable through it.
pub type TraversalPredicate = for<'a> fn(&'a BindgenContext, Edge) -> bool;
/// A `TraversalPredicate` implementation that follows all edges, and therefore
/// traversals using this predicate will see the whole IR graph reachable from
/// the traversal's roots.
pub fn all_edges(_: &BindgenContext, _: Edge) -> bool {
true
}
/// A `TraversalPredicate` implementation that only follows
/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate
/// will only visit the traversal's roots and their inner types. This is used
/// in no-recursive-allowlist mode, where inner types such as anonymous
/// structs/unions still need to be processed.
pub fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool {
edge.kind == EdgeKind::InnerType
}
/// A `TraversalPredicate` implementation that only follows edges to items that
/// are enabled for code generation. This lets us skip considering items for
/// which are not reachable from code generation.
pub fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool {
let cc = &ctx.options().codegen_config;
match edge.kind {
EdgeKind::Generic => {
ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx)
}
// We statically know the kind of item that non-generic edges can point
// to, so we don't need to actually resolve the item and check
// `Item::is_enabled_for_codegen`.
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::InnerType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::VarType |
EdgeKind::TypeReference => cc.types(),
EdgeKind::InnerVar => cc.vars(),
EdgeKind::Method => cc.methods(),
EdgeKind::Constructor => cc.constructors(),
EdgeKind::Destructor => cc.destructors(),
}
}
/// The storage for the set of items that have been seen (although their
/// outgoing edges might not have been fully traversed yet) in an active
/// traversal.
pub trait TraversalStorage<'ctx> {
/// Construct a new instance of this TraversalStorage, for a new traversal.
fn new(ctx: &'ctx BindgenContext) -> Self;
/// Add the given item to the storage. If the item has never been seen
/// before, return `true`. Otherwise, return `false`.
///
/// The `from` item is the item from which we discovered this item, or is
/// `None` if this item is a root.
fn add(&mut self, from: Option<ItemId>, item: ItemId) -> bool;
}
impl<'ctx> TraversalStorage<'ctx> for ItemSet {
fn new(_: &'ctx BindgenContext) -> Self {
ItemSet::new()
}
fn add(&mut self, _: Option<ItemId>, item: ItemId) -> bool {
self.insert(item)
}
}
/// A `TraversalStorage` implementation that keeps track of how we first reached
/// each item. This is useful for providing debug assertions with meaningful
/// diagnostic messages about dangling items.
#[derive(Debug)]
pub struct Paths<'ctx>(BTreeMap<ItemId, ItemId>, &'ctx BindgenContext);
impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> {
fn new(ctx: &'ctx BindgenContext) -> Self {
Paths(BTreeMap::new(), ctx)
}
fn add(&mut self, from: Option<ItemId>, item: ItemId) -> bool {
let newly_discovered =
self.0.insert(item, from.unwrap_or(item)).is_none();
if self.1.resolve_item_fallible(item).is_none() {
let mut path = vec![];
let mut current = item;
loop {
let predecessor = *self.0.get(&current).expect(
"We know we found this item id, so it must have a \
predecessor",
);
if predecessor == current {
break;
}
path.push(predecessor);
current = predecessor;
}
path.reverse();
panic!(
"Found reference to dangling id = {:?}\nvia path = {:?}",
item, path
);
}
newly_discovered
}
}
/// The queue of seen-but-not-yet-traversed items.
///
/// Using a FIFO queue with a traversal will yield a breadth-first traversal,
/// while using a LIFO queue will result in a depth-first traversal of the IR
/// graph.
pub trait TraversalQueue: Default {
/// Add a newly discovered item to the queue.
fn push(&mut self, item: ItemId);
/// Pop the next item to traverse, if any.
fn next(&mut self) -> Option<ItemId>;
}
impl TraversalQueue for Vec<ItemId> {
fn push(&mut self, item: ItemId) {
self.push(item);
}
fn next(&mut self) -> Option<ItemId> {
self.pop()
}
}
impl TraversalQueue for VecDeque<ItemId> {
fn push(&mut self, item: ItemId) {
self.push_back(item);
}
fn next(&mut self) -> Option<ItemId> {
self.pop_front()
}
}
/// Something that can receive edges from a `Trace` implementation.
pub trait Tracer {
/// Note an edge between items. Called from within a `Trace` implementation.
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind);
/// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`.
fn visit(&mut self, item: ItemId) {
self.visit_kind(item, EdgeKind::Generic);
}
}
impl<F> Tracer for F
where
F: FnMut(ItemId, EdgeKind),
{
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
(*self)(item, kind)
}
}
/// Trace all of the outgoing edges to other items. Implementations should call
/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)`
/// for each of their outgoing edges.
pub trait Trace {
/// If a particular type needs extra information beyond what it has in
/// `self` and `context` to find its referenced items, its implementation
/// can define this associated type, forcing callers to pass the needed
/// information through.
type Extra;
/// Trace all of this item's outgoing edges to other items.
fn trace<T>(
&self,
context: &BindgenContext,
tracer: &mut T,
extra: &Self::Extra,
) where
T: Tracer;
}
/// An graph traversal of the transitive closure of references between items.
///
/// See `BindgenContext::allowlisted_items` for more information.
pub struct ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
ctx: &'ctx BindgenContext,
/// The set of items we have seen thus far in this traversal.
seen: Storage,
/// The set of items that we have seen, but have yet to traverse.
queue: Queue,
/// The predicate that determines which edges this traversal will follow.
predicate: TraversalPredicate,
/// The item we are currently traversing.
currently_traversing: Option<ItemId>,
}
impl<'ctx, Storage, Queue> ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
/// Begin a new traversal, starting from the given roots.
pub fn new<R>(
ctx: &'ctx BindgenContext,
roots: R,
predicate: TraversalPredicate,
) -> ItemTraversal<'ctx, Storage, Queue>
where
R: IntoIterator<Item = ItemId>,
{
let mut seen = Storage::new(ctx);
let mut queue = Queue::default();
for id in roots {
seen.add(None, id);
queue.push(id);
}
ItemTraversal {
ctx,
seen,
queue,
predicate,
currently_traversing: None,
}
}
}
impl<'ctx, Storage, Queue> Tracer for ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
let edge = Edge::new(item, kind);
if !(self.predicate)(self.ctx, edge) {
return;
}
let is_newly_discovered =
self.seen.add(self.currently_traversing, item);
if is_newly_discovered {
self.queue.push(item)
}
}
}
impl<'ctx, Storage, Queue> Iterator for ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
type Item = ItemId;
fn next(&mut self) -> Option<Self::Item> {
let id = self.queue.next()?;
let newly_discovered = self.seen.add(None, id);
debug_assert!(
!newly_discovered,
"should have already seen anything we get out of our queue"
);
debug_assert!(
self.ctx.resolve_item_fallible(id).is_some(),
"should only get IDs of actual items in our context during traversal"
);
self.currently_traversing = Some(id);
id.trace(self.ctx, self, &());
self.currently_traversing = None;
Some(id)
}
}
/// An iterator to find any dangling items.
///
/// See `BindgenContext::assert_no_dangling_item_traversal` for more
/// information.
pub type AssertNoDanglingItemsTraversal<'ctx> =
ItemTraversal<'ctx, Paths<'ctx>, VecDeque<ItemId>>;

1287
third-party/vendor/bindgen/ir/ty.rs vendored Normal file

File diff suppressed because it is too large Load diff

414
third-party/vendor/bindgen/ir/var.rs vendored Normal file
View file

@ -0,0 +1,414 @@
//! Intermediate representation of variables.
use super::super::codegen::MacroTypeVariation;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::function::cursor_mangling;
use super::int::IntKind;
use super::item::Item;
use super::ty::{FloatKind, TypeKind};
use crate::callbacks::MacroParsingBehavior;
use crate::clang;
use crate::clang::ClangToken;
use crate::parse::{
ClangItemParser, ClangSubItemParser, ParseError, ParseResult,
};
use cexpr;
use std::io;
use std::num::Wrapping;
/// The type for a constant variable.
#[derive(Debug)]
pub enum VarType {
/// A boolean.
Bool(bool),
/// An integer.
Int(i64),
/// A floating point number.
Float(f64),
/// A character.
Char(u8),
/// A string, not necessarily well-formed utf-8.
String(Vec<u8>),
}
/// A `Var` is our intermediate representation of a variable.
#[derive(Debug)]
pub struct Var {
/// The name of the variable.
name: String,
/// The mangled name of the variable.
mangled_name: Option<String>,
/// The type of the variable.
ty: TypeId,
/// The value of the variable, that needs to be suitable for `ty`.
val: Option<VarType>,
/// Whether this variable is const.
is_const: bool,
}
impl Var {
/// Construct a new `Var`.
pub fn new(
name: String,
mangled_name: Option<String>,
ty: TypeId,
val: Option<VarType>,
is_const: bool,
) -> Var {
assert!(!name.is_empty());
Var {
name,
mangled_name,
ty,
val,
is_const,
}
}
/// Is this variable `const` qualified?
pub fn is_const(&self) -> bool {
self.is_const
}
/// The value of this constant variable, if any.
pub fn val(&self) -> Option<&VarType> {
self.val.as_ref()
}
/// Get this variable's type.
pub fn ty(&self) -> TypeId {
self.ty
}
/// Get this variable's name.
pub fn name(&self) -> &str {
&self.name
}
/// Get this variable's mangled name.
pub fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_deref()
}
}
impl DotAttributes for Var {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if self.is_const {
writeln!(out, "<tr><td>const</td><td>true</td></tr>")?;
}
if let Some(ref mangled) = self.mangled_name {
writeln!(
out,
"<tr><td>mangled name</td><td>{}</td></tr>",
mangled
)?;
}
Ok(())
}
}
fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind {
if value < 0 ||
ctx.options().default_macro_constant_type ==
MacroTypeVariation::Signed
{
if value < i32::min_value() as i64 || value > i32::max_value() as i64 {
IntKind::I64
} else if !ctx.options().fit_macro_constants ||
value < i16::min_value() as i64 ||
value > i16::max_value() as i64
{
IntKind::I32
} else if value < i8::min_value() as i64 ||
value > i8::max_value() as i64
{
IntKind::I16
} else {
IntKind::I8
}
} else if value > u32::max_value() as i64 {
IntKind::U64
} else if !ctx.options().fit_macro_constants ||
value > u16::max_value() as i64
{
IntKind::U32
} else if value > u8::max_value() as i64 {
IntKind::U16
} else {
IntKind::U8
}
}
/// Parses tokens from a CXCursor_MacroDefinition pointing into a function-like
/// macro, and calls the func_macro callback.
fn handle_function_macro(
cursor: &clang::Cursor,
callbacks: &dyn crate::callbacks::ParseCallbacks,
) {
let is_closing_paren = |t: &ClangToken| {
// Test cheap token kind before comparing exact spellings.
t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")"
};
let tokens: Vec<_> = cursor.tokens().iter().collect();
if let Some(boundary) = tokens.iter().position(is_closing_paren) {
let mut spelled = tokens.iter().map(ClangToken::spelling);
// Add 1, to convert index to length.
let left = spelled.by_ref().take(boundary + 1);
let left = left.collect::<Vec<_>>().concat();
if let Ok(left) = String::from_utf8(left) {
let right: Vec<_> = spelled.collect();
callbacks.func_macro(&left, &right);
}
}
}
impl ClangSubItemParser for Var {
fn parse(
cursor: clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use cexpr::expr::EvalResult;
use cexpr::literal::CChar;
use clang_sys::*;
match cursor.kind() {
CXCursor_MacroDefinition => {
for callbacks in &ctx.options().parse_callbacks {
match callbacks.will_parse_macro(&cursor.spelling()) {
MacroParsingBehavior::Ignore => {
return Err(ParseError::Continue);
}
MacroParsingBehavior::Default => {}
}
if cursor.is_macro_function_like() {
handle_function_macro(&cursor, callbacks.as_ref());
// We handled the macro, skip macro processing below.
return Err(ParseError::Continue);
}
}
let value = parse_macro(ctx, &cursor);
let (id, value) = match value {
Some(v) => v,
None => return Err(ParseError::Continue),
};
assert!(!id.is_empty(), "Empty macro name?");
let previously_defined = ctx.parsed_macro(&id);
// NB: It's important to "note" the macro even if the result is
// not an integer, otherwise we might loose other kind of
// derived macros.
ctx.note_parsed_macro(id.clone(), value.clone());
if previously_defined {
let name = String::from_utf8(id).unwrap();
warn!("Duplicated macro definition: {}", name);
return Err(ParseError::Continue);
}
// NOTE: Unwrapping, here and above, is safe, because the
// identifier of a token comes straight from clang, and we
// enforce utf8 there, so we should have already panicked at
// this point.
let name = String::from_utf8(id).unwrap();
let (type_kind, val) = match value {
EvalResult::Invalid => return Err(ParseError::Continue),
EvalResult::Float(f) => {
(TypeKind::Float(FloatKind::Double), VarType::Float(f))
}
EvalResult::Char(c) => {
let c = match c {
CChar::Char(c) => {
assert_eq!(c.len_utf8(), 1);
c as u8
}
CChar::Raw(c) => {
assert!(c <= ::std::u8::MAX as u64);
c as u8
}
};
(TypeKind::Int(IntKind::U8), VarType::Char(c))
}
EvalResult::Str(val) => {
let char_ty = Item::builtin_type(
TypeKind::Int(IntKind::U8),
true,
ctx,
);
for callbacks in &ctx.options().parse_callbacks {
callbacks.str_macro(&name, &val);
}
(TypeKind::Pointer(char_ty), VarType::String(val))
}
EvalResult::Int(Wrapping(value)) => {
let kind = ctx
.options()
.last_callback(|c| c.int_macro(&name, value))
.unwrap_or_else(|| {
default_macro_constant_type(ctx, value)
});
(TypeKind::Int(kind), VarType::Int(value))
}
};
let ty = Item::builtin_type(type_kind, true, ctx);
Ok(ParseResult::New(
Var::new(name, None, ty, Some(val), true),
Some(cursor),
))
}
CXCursor_VarDecl => {
let name = cursor.spelling();
if name.is_empty() {
warn!("Empty constant name?");
return Err(ParseError::Continue);
}
let ty = cursor.cur_type();
// TODO(emilio): do we have to special-case constant arrays in
// some other places?
let is_const = ty.is_const() ||
([CXType_ConstantArray, CXType_IncompleteArray]
.contains(&ty.kind()) &&
ty.elem_type()
.map_or(false, |element| element.is_const()));
let ty = match Item::from_ty(&ty, cursor, None, ctx) {
Ok(ty) => ty,
Err(e) => {
assert!(
matches!(ty.kind(), CXType_Auto | CXType_Unexposed),
"Couldn't resolve constant type, and it \
wasn't an nondeductible auto type or unexposed \
type!"
);
return Err(e);
}
};
// Note: Ty might not be totally resolved yet, see
// tests/headers/inner_const.hpp
//
// That's fine because in that case we know it's not a literal.
let canonical_ty = ctx
.safe_resolve_type(ty)
.and_then(|t| t.safe_canonical_type(ctx));
let is_integer = canonical_ty.map_or(false, |t| t.is_integer());
let is_float = canonical_ty.map_or(false, |t| t.is_float());
// TODO: We could handle `char` more gracefully.
// TODO: Strings, though the lookup is a bit more hard (we need
// to look at the canonical type of the pointee too, and check
// is char, u8, or i8 I guess).
let value = if is_integer {
let kind = match *canonical_ty.unwrap().kind() {
TypeKind::Int(kind) => kind,
_ => unreachable!(),
};
let mut val = cursor.evaluate().and_then(|v| v.as_int());
if val.is_none() || !kind.signedness_matches(val.unwrap()) {
val = get_integer_literal_from_cursor(&cursor);
}
val.map(|val| {
if kind == IntKind::Bool {
VarType::Bool(val != 0)
} else {
VarType::Int(val)
}
})
} else if is_float {
cursor
.evaluate()
.and_then(|v| v.as_double())
.map(VarType::Float)
} else {
cursor
.evaluate()
.and_then(|v| v.as_literal_string())
.map(VarType::String)
};
let mangling = cursor_mangling(ctx, &cursor);
let var = Var::new(name, mangling, ty, value, is_const);
Ok(ParseResult::New(var, Some(cursor)))
}
_ => {
/* TODO */
Err(ParseError::Continue)
}
}
}
}
/// Try and parse a macro using all the macros parsed until now.
fn parse_macro(
ctx: &BindgenContext,
cursor: &clang::Cursor,
) -> Option<(Vec<u8>, cexpr::expr::EvalResult)> {
use cexpr::expr;
let cexpr_tokens = cursor.cexpr_tokens();
let parser = expr::IdentifierParser::new(ctx.parsed_macros());
match parser.macro_definition(&cexpr_tokens) {
Ok((_, (id, val))) => Some((id.into(), val)),
_ => None,
}
}
fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option<i64> {
use cexpr::expr;
use cexpr::expr::EvalResult;
let cexpr_tokens = cursor.cexpr_tokens();
// TODO(emilio): We can try to parse other kinds of literals.
match expr::expr(&cexpr_tokens) {
Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val),
_ => None,
}
}
fn get_integer_literal_from_cursor(cursor: &clang::Cursor) -> Option<i64> {
use clang_sys::*;
let mut value = None;
cursor.visit(|c| {
match c.kind() {
CXCursor_IntegerLiteral | CXCursor_UnaryOperator => {
value = parse_int_literal_tokens(&c);
}
CXCursor_UnexposedExpr => {
value = get_integer_literal_from_cursor(&c);
}
_ => (),
}
if value.is_some() {
CXChildVisit_Break
} else {
CXChildVisit_Continue
}
});
value
}

3004
third-party/vendor/bindgen/lib.rs vendored Normal file

File diff suppressed because it is too large Load diff

32
third-party/vendor/bindgen/log_stubs.rs vendored Normal file
View file

@ -0,0 +1,32 @@
#![allow(unused)]
macro_rules! log {
(target: $target:expr, $lvl:expr, $($arg:tt)+) => {{
let _ = $target;
let _ = log!($lvl, $($arg)+);
}};
($lvl:expr, $($arg:tt)+) => {{
let _ = $lvl;
let _ = format_args!($($arg)+);
}};
}
macro_rules! error {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}
macro_rules! warn {
(target: $target:expr, $($arg:tt)*) => { log!(target: $target, "", $($arg)*) };
($($arg:tt)*) => { log!("", $($arg)*) };
}
macro_rules! info {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}
macro_rules! debug {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}
macro_rules! trace {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}

102
third-party/vendor/bindgen/parse.rs vendored Normal file
View file

@ -0,0 +1,102 @@
//! Common traits and types related to parsing our IR from Clang cursors.
use crate::clang;
use crate::ir::context::{BindgenContext, ItemId, TypeId};
use crate::ir::ty::TypeKind;
/// Not so much an error in the traditional sense, but a control flow message
/// when walking over Clang's AST with a cursor.
#[derive(Debug)]
pub enum ParseError {
/// Recurse down the current AST node's children.
Recurse,
/// Continue on to the next sibling AST node, or back up to the parent's
/// siblings if we've exhausted all of this node's siblings (and so on).
Continue,
}
/// The result of parsing a Clang AST node.
#[derive(Debug)]
pub enum ParseResult<T> {
/// We've already resolved this item before, here is the extant `ItemId` for
/// it.
AlreadyResolved(ItemId),
/// This is a newly parsed item. If the cursor is `Some`, it points to the
/// AST node where the new `T` was declared.
New(T, Option<clang::Cursor>),
}
/// An intermediate representation "sub-item" (i.e. one of the types contained
/// inside an `ItemKind` variant) that can be parsed from a Clang cursor.
pub trait ClangSubItemParser: Sized {
/// Attempt to parse this type from the given cursor.
///
/// The fact that is a reference guarantees it's held by the context, and
/// allow returning already existing types.
fn parse(
cursor: clang::Cursor,
context: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError>;
}
/// An intermediate representation item that can be parsed from a Clang cursor.
pub trait ClangItemParser: Sized {
/// Parse this item from the given Clang cursor.
fn parse(
cursor: clang::Cursor,
parent: Option<ItemId>,
context: &mut BindgenContext,
) -> Result<ItemId, ParseError>;
/// Parse this item from the given Clang type.
fn from_ty(
ty: &clang::Type,
location: clang::Cursor,
parent: Option<ItemId>,
ctx: &mut BindgenContext,
) -> Result<TypeId, ParseError>;
/// Identical to `from_ty`, but use the given `id` as the `ItemId` for the
/// newly parsed item.
fn from_ty_with_id(
id: ItemId,
ty: &clang::Type,
location: clang::Cursor,
parent: Option<ItemId>,
ctx: &mut BindgenContext,
) -> Result<TypeId, ParseError>;
/// Parse this item from the given Clang type, or if we haven't resolved all
/// the other items this one depends on, an unresolved reference.
fn from_ty_or_ref(
ty: clang::Type,
location: clang::Cursor,
parent_id: Option<ItemId>,
context: &mut BindgenContext,
) -> TypeId;
/// Identical to `from_ty_or_ref`, but use the given `potential_id` as the
/// `ItemId` for the newly parsed item.
fn from_ty_or_ref_with_id(
potential_id: ItemId,
ty: clang::Type,
location: clang::Cursor,
parent_id: Option<ItemId>,
context: &mut BindgenContext,
) -> TypeId;
/// Create a named template type.
fn type_param(
with_id: Option<ItemId>,
location: clang::Cursor,
ctx: &mut BindgenContext,
) -> Option<TypeId>;
/// Create a builtin type.
fn builtin_type(
kind: TypeKind,
is_const: bool,
context: &mut BindgenContext,
) -> TypeId;
}

96
third-party/vendor/bindgen/regex_set.rs vendored Normal file
View file

@ -0,0 +1,96 @@
//! A type that represents the union of a set of regular expressions.
use regex::RegexSet as RxSet;
use std::cell::Cell;
/// A dynamic set of regular expressions.
#[derive(Clone, Debug, Default)]
pub struct RegexSet {
items: Vec<String>,
/// Whether any of the items in the set was ever matched. The length of this
/// vector is exactly the length of `items`.
matched: Vec<Cell<bool>>,
set: Option<RxSet>,
/// Whether we should record matching items in the `matched` vector or not.
record_matches: bool,
}
impl RegexSet {
/// Is this set empty?
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
/// Insert a new regex into this set.
pub fn insert<S>(&mut self, string: S)
where
S: AsRef<str>,
{
let string = string.as_ref().to_owned();
if string == "*" {
warn!("using wildcard patterns (`*`) is no longer considered valid. Use `.*` instead");
}
self.items.push(string);
self.matched.push(Cell::new(false));
self.set = None;
}
/// Returns slice of String from its field 'items'
pub fn get_items(&self) -> &[String] {
&self.items[..]
}
/// Returns an iterator over regexes in the set which didn't match any
/// strings yet.
pub fn unmatched_items(&self) -> impl Iterator<Item = &String> {
self.items.iter().enumerate().filter_map(move |(i, item)| {
if !self.record_matches || self.matched[i].get() {
return None;
}
Some(item)
})
}
/// Construct a RegexSet from the set of entries we've accumulated.
///
/// Must be called before calling `matches()`, or it will always return
/// false.
pub fn build(&mut self, record_matches: bool) {
let items = self.items.iter().map(|item| format!("^({})$", item));
self.record_matches = record_matches;
self.set = match RxSet::new(items) {
Ok(x) => Some(x),
Err(e) => {
warn!("Invalid regex in {:?}: {:?}", self.items, e);
None
}
}
}
/// Does the given `string` match any of the regexes in this set?
pub fn matches<S>(&self, string: S) -> bool
where
S: AsRef<str>,
{
let s = string.as_ref();
let set = match self.set {
Some(ref set) => set,
None => return false,
};
if !self.record_matches {
return set.is_match(s);
}
let matches = set.matches(s);
if !matches.matched_any() {
return false;
}
for i in matches.iter() {
self.matched[i].set(true);
}
true
}
}

52
third-party/vendor/bindgen/time.rs vendored Normal file
View file

@ -0,0 +1,52 @@
use std::io::{self, Write};
use std::time::{Duration, Instant};
/// RAII timer to measure how long phases take.
#[derive(Debug)]
pub struct Timer<'a> {
output: bool,
name: &'a str,
start: Instant,
}
impl<'a> Timer<'a> {
/// Creates a Timer with the given name, and starts it. By default,
/// will print to stderr when it is `drop`'d
pub fn new(name: &'a str) -> Self {
Timer {
output: true,
name,
start: Instant::now(),
}
}
/// Sets whether or not the Timer will print a message
/// when it is dropped.
pub fn with_output(mut self, output: bool) -> Self {
self.output = output;
self
}
/// Returns the time elapsed since the timer's creation
pub fn elapsed(&self) -> Duration {
Instant::now() - self.start
}
fn print_elapsed(&mut self) {
if self.output {
let elapsed = self.elapsed();
let time = (elapsed.as_secs() as f64) * 1e3 +
(elapsed.subsec_nanos() as f64) / 1e6;
let stderr = io::stderr();
// Arbitrary output format, subject to change.
writeln!(stderr.lock(), " time: {:>9.3} ms.\t{}", time, self.name)
.expect("timer write should not fail");
}
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
self.print_elapsed();
}
}