Vendor things

This commit is contained in:
John Doty 2024-03-08 11:03:01 -08:00
parent 5deceec006
commit 977e3c17e5
19434 changed files with 10682014 additions and 0 deletions

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,23 @@
#[cfg(all(
target_arch = "wasm32",
not(any(target_os = "emscripten", feature = "webgl"))
))]
mod web;
#[cfg(all(
target_arch = "wasm32",
not(any(target_os = "emscripten", feature = "webgl"))
))]
pub(crate) use web::Context;
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
mod direct;
#[cfg(any(
not(target_arch = "wasm32"),
target_os = "emscripten",
feature = "webgl"
))]
pub(crate) use direct::Context;

3318
third-party/vendor/wgpu/src/backend/web.rs vendored Normal file

File diff suppressed because it is too large Load diff

4021
third-party/vendor/wgpu/src/context.rs vendored Normal file

File diff suppressed because it is too large Load diff

5069
third-party/vendor/wgpu/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load diff

93
third-party/vendor/wgpu/src/macros.rs vendored Normal file
View file

@ -0,0 +1,93 @@
//! Convenience macros
/// Macro to produce an array of [`VertexAttribute`](crate::VertexAttribute).
///
/// Output has type: `[VertexAttribute; _]`. Usage is as follows:
/// ```
/// # use wgpu::vertex_attr_array;
/// let attrs = vertex_attr_array![0 => Float32x2, 1 => Float32, 2 => Uint16x4];
/// ```
/// This example specifies a list of three [`VertexAttribute`](crate::VertexAttribute),
/// each with the given `shader_location` and `format`.
/// Offsets are calculated automatically.
#[macro_export]
macro_rules! vertex_attr_array {
($($loc:expr => $fmt:ident),* $(,)?) => {
$crate::vertex_attr_array!([] ; 0; $($loc => $fmt ,)*)
};
([$($t:expr,)*] ; $off:expr ;) => { [$($t,)*] };
([$($t:expr,)*] ; $off:expr ; $loc:expr => $item:ident, $($ll:expr => $ii:ident ,)*) => {
$crate::vertex_attr_array!(
[$($t,)*
$crate::VertexAttribute {
format: $crate::VertexFormat :: $item,
offset: $off,
shader_location: $loc,
},];
$off + $crate::VertexFormat :: $item.size();
$($ll => $ii ,)*
)
};
}
#[test]
fn test_vertex_attr_array() {
let attrs = vertex_attr_array![0 => Float32x2, 3 => Uint16x4];
// VertexAttribute does not support PartialEq, so we cannot test directly
assert_eq!(attrs.len(), 2);
assert_eq!(attrs[0].offset, 0);
assert_eq!(attrs[0].shader_location, 0);
assert_eq!(attrs[1].offset, std::mem::size_of::<(f32, f32)>() as u64);
assert_eq!(attrs[1].shader_location, 3);
}
/// Macro to load a SPIR-V module statically.
///
/// It ensures the word alignment as well as the magic number.
///
/// Return type: [`crate::ShaderModuleDescriptor`]
#[macro_export]
#[cfg(feature = "spirv")]
macro_rules! include_spirv {
($($token:tt)*) => {
{
//log::info!("including '{}'", $($token)*);
$crate::ShaderModuleDescriptor {
label: Some($($token)*),
source: $crate::util::make_spirv(include_bytes!($($token)*)),
}
}
};
}
/// Macro to load raw SPIR-V data statically, for use with [`Features::SPIRV_SHADER_PASSTHROUGH`].
///
/// It ensures the word alignment as well as the magic number.
///
/// [`Features::SPIRV_SHADER_PASSTHROUGH`]: crate::Features::SPIRV_SHADER_PASSTHROUGH
#[macro_export]
macro_rules! include_spirv_raw {
($($token:tt)*) => {
{
//log::info!("including '{}'", $($token)*);
$crate::ShaderModuleDescriptorSpirV {
label: Some($($token)*),
source: $crate::util::make_spirv_raw(include_bytes!($($token)*)),
}
}
};
}
/// Macro to load a WGSL module statically.
#[macro_export]
macro_rules! include_wgsl {
($($token:tt)*) => {
{
//log::info!("including '{}'", $($token)*);
$crate::ShaderModuleDescriptor {
label: Some($($token)*),
source: $crate::ShaderSource::Wgsl(include_str!($($token)*).into()),
}
}
};
}

183
third-party/vendor/wgpu/src/util/belt.rs vendored Normal file
View file

@ -0,0 +1,183 @@
use crate::{
util::align_to, Buffer, BufferAddress, BufferDescriptor, BufferSize, BufferUsages,
BufferViewMut, CommandEncoder, Device, MapMode,
};
use std::fmt;
use std::sync::{mpsc, Arc};
struct Chunk {
buffer: Arc<Buffer>,
size: BufferAddress,
offset: BufferAddress,
}
/// Efficiently performs many buffer writes by sharing and reusing temporary buffers.
///
/// Internally it uses a ring-buffer of staging buffers that are sub-allocated.
/// It has an advantage over [`Queue::write_buffer()`] in a way that it returns a mutable slice,
/// which you can fill to avoid an extra data copy.
///
/// Using a staging belt is slightly complicated, and generally goes as follows:
/// 1. Write to buffers that need writing to using [`StagingBelt::write_buffer()`].
/// 2. Call [`StagingBelt::finish()`].
/// 3. Submit all command encoders that were used in step 1.
/// 4. Call [`StagingBelt::recall()`].
///
/// [`Queue::write_buffer()`]: crate::Queue::write_buffer
pub struct StagingBelt {
chunk_size: BufferAddress,
/// Chunks into which we are accumulating data to be transferred.
active_chunks: Vec<Chunk>,
/// Chunks that have scheduled transfers already; they are unmapped and some
/// command encoder has one or more `copy_buffer_to_buffer` commands with them
/// as source.
closed_chunks: Vec<Chunk>,
/// Chunks that are back from the GPU and ready to be mapped for write and put
/// into `active_chunks`.
free_chunks: Vec<Chunk>,
/// When closed chunks are mapped again, the map callback sends them here.
sender: mpsc::Sender<Chunk>,
/// Free chunks are received here to be put on `self.free_chunks`.
receiver: mpsc::Receiver<Chunk>,
}
impl StagingBelt {
/// Create a new staging belt.
///
/// The `chunk_size` is the unit of internal buffer allocation; writes will be
/// sub-allocated within each chunk. Therefore, for optimal use of memory, the
/// chunk size should be:
///
/// * larger than the largest single [`StagingBelt::write_buffer()`] operation;
/// * 1-4 times less than the total amount of data uploaded per submission
/// (per [`StagingBelt::finish()`]); and
/// * bigger is better, within these bounds.
pub fn new(chunk_size: BufferAddress) -> Self {
let (sender, receiver) = mpsc::channel();
StagingBelt {
chunk_size,
active_chunks: Vec::new(),
closed_chunks: Vec::new(),
free_chunks: Vec::new(),
sender,
receiver,
}
}
/// Allocate the staging belt slice of `size` to be uploaded into the `target` buffer
/// at the specified offset.
///
/// The upload will be placed into the provided command encoder. This encoder
/// must be submitted after [`StagingBelt::finish()`] is called and before
/// [`StagingBelt::recall()`] is called.
///
/// If the `size` is greater than the size of any free internal buffer, a new buffer
/// will be allocated for it. Therefore, the `chunk_size` passed to [`StagingBelt::new()`]
/// should ideally be larger than every such size.
pub fn write_buffer(
&mut self,
encoder: &mut CommandEncoder,
target: &Buffer,
offset: BufferAddress,
size: BufferSize,
device: &Device,
) -> BufferViewMut {
let mut chunk = if let Some(index) = self
.active_chunks
.iter()
.position(|chunk| chunk.offset + size.get() <= chunk.size)
{
self.active_chunks.swap_remove(index)
} else {
self.receive_chunks(); // ensure self.free_chunks is up to date
if let Some(index) = self
.free_chunks
.iter()
.position(|chunk| size.get() <= chunk.size)
{
self.free_chunks.swap_remove(index)
} else {
let size = self.chunk_size.max(size.get());
Chunk {
buffer: Arc::new(device.create_buffer(&BufferDescriptor {
label: Some("(wgpu internal) StagingBelt staging buffer"),
size,
usage: BufferUsages::MAP_WRITE | BufferUsages::COPY_SRC,
mapped_at_creation: true,
})),
size,
offset: 0,
}
}
};
encoder.copy_buffer_to_buffer(&chunk.buffer, chunk.offset, target, offset, size.get());
let old_offset = chunk.offset;
chunk.offset = align_to(chunk.offset + size.get(), crate::MAP_ALIGNMENT);
self.active_chunks.push(chunk);
self.active_chunks
.last()
.unwrap()
.buffer
.slice(old_offset..old_offset + size.get())
.get_mapped_range_mut()
}
/// Prepare currently mapped buffers for use in a submission.
///
/// This must be called before the command encoder(s) provided to
/// [`StagingBelt::write_buffer()`] are submitted.
///
/// At this point, all the partially used staging buffers are closed (cannot be used for
/// further writes) until after [`StagingBelt::recall()`] is called *and* the GPU is done
/// copying the data from them.
pub fn finish(&mut self) {
for chunk in self.active_chunks.drain(..) {
chunk.buffer.unmap();
self.closed_chunks.push(chunk);
}
}
/// Recall all of the closed buffers back to be reused.
///
/// This must only be called after the command encoder(s) provided to
/// [`StagingBelt::write_buffer()`] are submitted. Additional calls are harmless.
/// Not calling this as soon as possible may result in increased buffer memory usage.
pub fn recall(&mut self) {
self.receive_chunks();
let sender = &self.sender;
for chunk in self.closed_chunks.drain(..) {
let sender = sender.clone();
chunk
.buffer
.clone()
.slice(..)
.map_async(MapMode::Write, move |_| {
let _ = sender.send(chunk);
});
}
}
/// Move all chunks that the GPU is done with (and are now mapped again)
/// from `self.receiver` to `self.free_chunks`.
fn receive_chunks(&mut self) {
while let Ok(mut chunk) = self.receiver.try_recv() {
chunk.offset = 0;
self.free_chunks.push(chunk);
}
}
}
impl fmt::Debug for StagingBelt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StagingBelt")
.field("chunk_size", &self.chunk_size)
.field("active_chunks", &self.active_chunks.len())
.field("closed_chunks", &self.closed_chunks.len())
.field("free_chunks", &self.free_chunks.len())
.finish_non_exhaustive()
}
}

View file

@ -0,0 +1,145 @@
/// Describes a [Buffer](crate::Buffer) when allocating.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct BufferInitDescriptor<'a> {
/// Debug label of a buffer. This will show up in graphics debuggers for easy identification.
pub label: crate::Label<'a>,
/// Contents of a buffer on creation.
pub contents: &'a [u8],
/// Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
/// will panic.
pub usage: crate::BufferUsages,
}
/// Utility methods not meant to be in the main API.
pub trait DeviceExt {
/// Creates a [Buffer](crate::Buffer) with data to initialize it.
fn create_buffer_init(&self, desc: &BufferInitDescriptor) -> crate::Buffer;
/// Upload an entire texture and its mipmaps from a source buffer.
///
/// Expects all mipmaps to be tightly packed in the data buffer.
///
/// If the texture is a 2DArray texture, uploads each layer in order, expecting
/// each layer and its mips to be tightly packed.
///
/// Example:
/// Layer0Mip0 Layer0Mip1 Layer0Mip2 ... Layer1Mip0 Layer1Mip1 Layer1Mip2 ...
///
/// Implicitly adds the `COPY_DST` usage if it is not present in the descriptor,
/// as it is required to be able to upload the data to the gpu.
fn create_texture_with_data(
&self,
queue: &crate::Queue,
desc: &crate::TextureDescriptor,
data: &[u8],
) -> crate::Texture;
}
impl DeviceExt for crate::Device {
fn create_buffer_init(&self, descriptor: &BufferInitDescriptor<'_>) -> crate::Buffer {
// Skip mapping if the buffer is zero sized
if descriptor.contents.is_empty() {
let wgt_descriptor = crate::BufferDescriptor {
label: descriptor.label,
size: 0,
usage: descriptor.usage,
mapped_at_creation: false,
};
self.create_buffer(&wgt_descriptor)
} else {
let unpadded_size = descriptor.contents.len() as crate::BufferAddress;
// Valid vulkan usage is
// 1. buffer size must be a multiple of COPY_BUFFER_ALIGNMENT.
// 2. buffer size must be greater than 0.
// Therefore we round the value up to the nearest multiple, and ensure it's at least COPY_BUFFER_ALIGNMENT.
let align_mask = crate::COPY_BUFFER_ALIGNMENT - 1;
let padded_size =
((unpadded_size + align_mask) & !align_mask).max(crate::COPY_BUFFER_ALIGNMENT);
let wgt_descriptor = crate::BufferDescriptor {
label: descriptor.label,
size: padded_size,
usage: descriptor.usage,
mapped_at_creation: true,
};
let buffer = self.create_buffer(&wgt_descriptor);
buffer.slice(..).get_mapped_range_mut()[..unpadded_size as usize]
.copy_from_slice(descriptor.contents);
buffer.unmap();
buffer
}
}
fn create_texture_with_data(
&self,
queue: &crate::Queue,
desc: &crate::TextureDescriptor,
data: &[u8],
) -> crate::Texture {
// Implicitly add the COPY_DST usage
let mut desc = desc.to_owned();
desc.usage |= crate::TextureUsages::COPY_DST;
let texture = self.create_texture(&desc);
// Will return None only if it's a combined depth-stencil format
// If so, default to 4, validation will fail later anyway since the depth or stencil
// aspect needs to be written to individually
let block_size = desc.format.block_size(None).unwrap_or(4);
let (block_width, block_height) = desc.format.block_dimensions();
let layer_iterations = desc.array_layer_count();
let mut binary_offset = 0;
for layer in 0..layer_iterations {
for mip in 0..desc.mip_level_count {
let mut mip_size = desc.mip_level_size(mip).unwrap();
// copying layers separately
if desc.dimension != wgt::TextureDimension::D3 {
mip_size.depth_or_array_layers = 1;
}
// When uploading mips of compressed textures and the mip is supposed to be
// a size that isn't a multiple of the block size, the mip needs to be uploaded
// as its "physical size" which is the size rounded up to the nearest block size.
let mip_physical = mip_size.physical_size(desc.format);
// All these calculations are performed on the physical size as that's the
// data that exists in the buffer.
let width_blocks = mip_physical.width / block_width;
let height_blocks = mip_physical.height / block_height;
let bytes_per_row = width_blocks * block_size;
let data_size = bytes_per_row * height_blocks * mip_size.depth_or_array_layers;
let end_offset = binary_offset + data_size as usize;
queue.write_texture(
crate::ImageCopyTexture {
texture: &texture,
mip_level: mip,
origin: crate::Origin3d {
x: 0,
y: 0,
z: layer,
},
aspect: wgt::TextureAspect::All,
},
&data[binary_offset..end_offset],
crate::ImageDataLayout {
offset: 0,
bytes_per_row: Some(bytes_per_row),
rows_per_image: Some(height_blocks),
},
mip_physical,
);
binary_offset = end_offset;
}
}
texture
}
}

View file

@ -0,0 +1,202 @@
use std::ops::Range;
use wgt::{BufferAddress, DynamicOffset, IndexFormat};
use crate::{BindGroup, Buffer, BufferSlice, RenderBundleEncoder, RenderPass, RenderPipeline};
/// Methods shared by [`RenderPass`] and [`RenderBundleEncoder`].
pub trait RenderEncoder<'a> {
/// Sets the active bind group for a given bind group index. The bind group layout
/// in the active pipeline when any `draw()` function is called must match the layout of this bind group.
///
/// If the bind group have dynamic offsets, provide them in order of their declaration.
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]);
/// Sets the active render pipeline.
///
/// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
fn set_pipeline(&mut self, pipeline: &'a RenderPipeline);
/// Sets the active index buffer.
///
/// Subsequent calls to [`draw_indexed`](RenderEncoder::draw_indexed) on this [`RenderEncoder`] will
/// use `buffer` as the source index buffer.
fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat);
/// Assign a vertex buffer to a slot.
///
/// Subsequent calls to [`draw`] and [`draw_indexed`] on this
/// [`RenderEncoder`] will use `buffer` as one of the source vertex buffers.
///
/// The `slot` refers to the index of the matching descriptor in
/// [VertexState::buffers](crate::VertexState::buffers).
///
/// [`draw`]: RenderEncoder::draw
/// [`draw_indexed`]: RenderEncoder::draw_indexed
fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>);
/// Draws primitives from the active vertex buffer(s).
///
/// The active vertex buffers can be set with [`RenderEncoder::set_vertex_buffer`].
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>);
/// Draws indexed primitives using the active index buffer and the active vertex buffers.
///
/// The active index buffer can be set with [`RenderEncoder::set_index_buffer`], while the active
/// vertex buffers can be set with [`RenderEncoder::set_vertex_buffer`].
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>);
/// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
///
/// The active vertex buffers can be set with [`RenderEncoder::set_vertex_buffer`].
///
/// The structure expected in `indirect_buffer` must conform to [`DrawIndirect`](crate::util::DrawIndirect).
fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress);
/// Draws indexed primitives using the active index buffer and the active vertex buffers,
/// based on the contents of the `indirect_buffer`.
///
/// The active index buffer can be set with [`RenderEncoder::set_index_buffer`], while the active
/// vertex buffers can be set with [`RenderEncoder::set_vertex_buffer`].
///
/// The structure expected in `indirect_buffer` must conform to [`DrawIndexedIndirect`](crate::util::DrawIndexedIndirect).
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
);
/// [`wgt::Features::PUSH_CONSTANTS`] must be enabled on the device in order to call this function.
///
/// Set push constant data.
///
/// Offset is measured in bytes, but must be a multiple of [`wgt::PUSH_CONSTANT_ALIGNMENT`].
///
/// Data size must be a multiple of 4 and must be aligned to the 4s, so we take an array of u32.
/// For example, with an offset of 4 and an array of `[u32; 3]`, that will write to the range
/// of 4..16.
///
/// For each byte in the range of push constant data written, the union of the stages of all push constant
/// ranges that covers that byte must be exactly `stages`. There's no good way of explaining this simply,
/// so here are some examples:
///
/// ```text
/// For the given ranges:
/// - 0..4 Vertex
/// - 4..8 Fragment
/// ```
///
/// You would need to upload this in two set_push_constants calls. First for the `Vertex` range, second for the `Fragment` range.
///
/// ```text
/// For the given ranges:
/// - 0..8 Vertex
/// - 4..12 Fragment
/// ```
///
/// You would need to upload this in three set_push_constants calls. First for the `Vertex` only range 0..4, second
/// for the `Vertex | Fragment` range 4..8, third for the `Fragment` range 8..12.
fn set_push_constants(&mut self, stages: wgt::ShaderStages, offset: u32, data: &[u8]);
}
impl<'a> RenderEncoder<'a> for RenderPass<'a> {
#[inline(always)]
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) {
Self::set_bind_group(self, index, bind_group, offsets);
}
#[inline(always)]
fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
Self::set_pipeline(self, pipeline);
}
#[inline(always)]
fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
Self::set_index_buffer(self, buffer_slice, index_format);
}
#[inline(always)]
fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
Self::set_vertex_buffer(self, slot, buffer_slice);
}
#[inline(always)]
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
Self::draw(self, vertices, instances);
}
#[inline(always)]
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
Self::draw_indexed(self, indices, base_vertex, instances);
}
#[inline(always)]
fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
Self::draw_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
Self::draw_indexed_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn set_push_constants(&mut self, stages: wgt::ShaderStages, offset: u32, data: &[u8]) {
Self::set_push_constants(self, stages, offset, data);
}
}
impl<'a> RenderEncoder<'a> for RenderBundleEncoder<'a> {
#[inline(always)]
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) {
Self::set_bind_group(self, index, bind_group, offsets);
}
#[inline(always)]
fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
Self::set_pipeline(self, pipeline);
}
#[inline(always)]
fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
Self::set_index_buffer(self, buffer_slice, index_format);
}
#[inline(always)]
fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
Self::set_vertex_buffer(self, slot, buffer_slice);
}
#[inline(always)]
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
Self::draw(self, vertices, instances);
}
#[inline(always)]
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
Self::draw_indexed(self, indices, base_vertex, instances);
}
#[inline(always)]
fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
Self::draw_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
Self::draw_indexed_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn set_push_constants(&mut self, stages: wgt::ShaderStages, offset: u32, data: &[u8]) {
Self::set_push_constants(self, stages, offset, data);
}
}

View file

@ -0,0 +1,81 @@
/// The structure expected in `indirect_buffer` for [`RenderEncoder::draw_indirect`](crate::util::RenderEncoder::draw_indirect).
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
pub struct DrawIndirect {
/// The number of vertices to draw.
pub vertex_count: u32,
/// The number of instances to draw.
pub instance_count: u32,
/// The Index of the first vertex to draw.
pub base_vertex: u32,
/// The instance ID of the first instance to draw.
/// Has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`](crate::Features::INDIRECT_FIRST_INSTANCE) is enabled.
pub base_instance: u32,
}
impl DrawIndirect {
/// Returns the bytes representation of the struct, ready to be written in a [`Buffer`](crate::Buffer).
pub fn as_bytes(&self) -> &[u8] {
unsafe {
std::mem::transmute(std::slice::from_raw_parts(
self as *const _ as *const u8,
std::mem::size_of::<Self>(),
))
}
}
}
/// The structure expected in `indirect_buffer` for [`RenderEncoder::draw_indexed_indirect`](crate::util::RenderEncoder::draw_indexed_indirect).
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
pub struct DrawIndexedIndirect {
/// The number of vertices to draw.
pub vertex_count: u32,
/// The number of instances to draw.
pub instance_count: u32,
/// The base index within the index buffer.
pub base_index: u32,
/// The value added to the vertex index before indexing into the vertex buffer.
pub vertex_offset: i32,
/// The instance ID of the first instance to draw.
/// Has to be 0, unless [`Features::INDIRECT_FIRST_INSTANCE`](crate::Features::INDIRECT_FIRST_INSTANCE) is enabled.
pub base_instance: u32,
}
impl DrawIndexedIndirect {
/// Returns the bytes representation of the struct, ready to be written in a [`Buffer`](crate::Buffer).
pub fn as_bytes(&self) -> &[u8] {
unsafe {
std::mem::transmute(std::slice::from_raw_parts(
self as *const _ as *const u8,
std::mem::size_of::<Self>(),
))
}
}
}
/// The structure expected in `indirect_buffer` for [`ComputePass::dispatch_workgroups_indirect`](crate::ComputePass::dispatch_workgroups_indirect).
///
/// x, y and z denote the number of work groups to dispatch in each dimension.
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
pub struct DispatchIndirect {
/// The number of work groups in X dimension.
pub x: u32,
/// The number of work groups in Y dimension.
pub y: u32,
/// The number of work groups in Z dimension.
pub z: u32,
}
impl DispatchIndirect {
/// Returns the bytes representation of the struct, ready to be written in a [`Buffer`](crate::Buffer).
pub fn as_bytes(&self) -> &[u8] {
unsafe {
std::mem::transmute(std::slice::from_raw_parts(
self as *const _ as *const u8,
std::mem::size_of::<Self>(),
))
}
}
}

116
third-party/vendor/wgpu/src/util/init.rs vendored Normal file
View file

@ -0,0 +1,116 @@
use wgt::{Backends, PowerPreference, RequestAdapterOptions};
use crate::{Adapter, Instance, Surface};
#[cfg(any(not(target_arch = "wasm32"), feature = "wgc"))]
pub use wgc::instance::parse_backends_from_comma_list;
/// Always returns WEBGPU on wasm over webgpu.
#[cfg(all(target_arch = "wasm32", not(feature = "wgc")))]
pub fn parse_backends_from_comma_list(_string: &str) -> Backends {
Backends::BROWSER_WEBGPU
}
/// Get a set of backend bits from the environment variable WGPU_BACKEND.
pub fn backend_bits_from_env() -> Option<Backends> {
std::env::var("WGPU_BACKEND")
.as_deref()
.map(str::to_lowercase)
.ok()
.as_deref()
.map(parse_backends_from_comma_list)
}
/// Get a power preference from the environment variable WGPU_POWER_PREF
pub fn power_preference_from_env() -> Option<PowerPreference> {
Some(
match std::env::var("WGPU_POWER_PREF")
.as_deref()
.map(str::to_lowercase)
.as_deref()
{
Ok("low") => PowerPreference::LowPower,
Ok("high") => PowerPreference::HighPerformance,
_ => return None,
},
)
}
/// Initialize the adapter obeying the WGPU_ADAPTER_NAME environment variable.
#[cfg(not(target_arch = "wasm32"))]
pub fn initialize_adapter_from_env(
instance: &Instance,
compatible_surface: Option<&Surface>,
) -> Option<Adapter> {
let desired_adapter_name = std::env::var("WGPU_ADAPTER_NAME")
.as_deref()
.map(str::to_lowercase)
.ok()?;
let adapters = instance.enumerate_adapters(Backends::all());
let mut chosen_adapter = None;
for adapter in adapters {
let info = adapter.get_info();
if let Some(surface) = compatible_surface {
if !adapter.is_surface_supported(surface) {
continue;
}
}
if info.name.to_lowercase().contains(&desired_adapter_name) {
chosen_adapter = Some(adapter);
break;
}
}
Some(chosen_adapter.expect("WGPU_ADAPTER_NAME set but no matching adapter found!"))
}
/// Initialize the adapter obeying the WGPU_ADAPTER_NAME environment variable.
#[cfg(target_arch = "wasm32")]
pub fn initialize_adapter_from_env(
_instance: &Instance,
_compatible_surface: Option<&Surface>,
) -> Option<Adapter> {
None
}
/// Initialize the adapter obeying the WGPU_ADAPTER_NAME environment variable and if it doesn't exist fall back on a default adapter.
pub async fn initialize_adapter_from_env_or_default(
instance: &Instance,
compatible_surface: Option<&Surface>,
) -> Option<Adapter> {
match initialize_adapter_from_env(instance, compatible_surface) {
Some(a) => Some(a),
None => {
instance
.request_adapter(&RequestAdapterOptions {
power_preference: power_preference_from_env().unwrap_or_default(),
force_fallback_adapter: false,
compatible_surface,
})
.await
}
}
}
/// Choose which DX12 shader compiler to use from the environment variable `WGPU_DX12_COMPILER`.
///
/// Possible values are `dxc` and `fxc`. Case insensitive.
pub fn dx12_shader_compiler_from_env() -> Option<wgt::Dx12Compiler> {
Some(
match std::env::var("WGPU_DX12_COMPILER")
.as_deref()
.map(str::to_lowercase)
.as_deref()
{
Ok("dxc") => wgt::Dx12Compiler::Dxc {
dxil_path: None,
dxc_path: None,
},
Ok("fxc") => wgt::Dx12Compiler::Fxc,
_ => return None,
},
)
}

143
third-party/vendor/wgpu/src/util/mod.rs vendored Normal file
View file

@ -0,0 +1,143 @@
//! Utility structures and functions that are built on top of the main `wgpu` API.
//!
//! Nothing in this module is a part of the WebGPU API specification;
//! they are unique to the `wgpu` library.
mod belt;
mod device;
mod encoder;
mod indirect;
mod init;
use std::sync::Arc;
use std::{
borrow::Cow,
mem::{align_of, size_of},
ptr::copy_nonoverlapping,
};
pub use belt::StagingBelt;
pub use device::{BufferInitDescriptor, DeviceExt};
pub use encoder::RenderEncoder;
pub use indirect::*;
pub use init::*;
pub use wgt::math::*;
/// Treat the given byte slice as a SPIR-V module.
///
/// # Panic
///
/// This function panics if:
///
/// - Input length isn't multiple of 4
/// - Input is longer than [`usize::max_value`]
/// - Input is empty
/// - SPIR-V magic number is missing from beginning of stream
#[cfg(feature = "spirv")]
pub fn make_spirv(data: &[u8]) -> super::ShaderSource {
super::ShaderSource::SpirV(make_spirv_raw(data))
}
/// Version of make_spirv intended for use with [`Device::create_shader_module_spirv`].
/// Returns raw slice instead of ShaderSource.
///
/// [`Device::create_shader_module_spirv`]: crate::Device::create_shader_module_spirv
pub fn make_spirv_raw(data: &[u8]) -> Cow<[u32]> {
const MAGIC_NUMBER: u32 = 0x0723_0203;
assert_eq!(
data.len() % size_of::<u32>(),
0,
"data size is not a multiple of 4"
);
assert_ne!(data.len(), 0, "data size must be larger than zero");
//If the data happens to be aligned, directly use the byte array,
// otherwise copy the byte array in an owned vector and use that instead.
let mut words = if data.as_ptr().align_offset(align_of::<u32>()) == 0 {
let (pre, words, post) = unsafe { data.align_to::<u32>() };
debug_assert!(pre.is_empty());
debug_assert!(post.is_empty());
Cow::from(words)
} else {
let mut words = vec![0u32; data.len() / size_of::<u32>()];
unsafe {
copy_nonoverlapping(data.as_ptr(), words.as_mut_ptr() as *mut u8, data.len());
}
Cow::from(words)
};
// Before checking if the data starts with the magic, check if it starts
// with the magic in non-native endianness, own & swap the data if so.
if words[0] == MAGIC_NUMBER.swap_bytes() {
for word in Cow::to_mut(&mut words) {
*word = word.swap_bytes();
}
}
assert_eq!(
words[0], MAGIC_NUMBER,
"wrong magic word {:x}. Make sure you are using a binary SPIRV file.",
words[0]
);
words
}
/// CPU accessible buffer used to download data back from the GPU.
pub struct DownloadBuffer(
Arc<super::Buffer>,
Box<dyn crate::context::BufferMappedRange>,
);
impl DownloadBuffer {
/// Asynchronously read the contents of a buffer.
pub fn read_buffer(
device: &super::Device,
queue: &super::Queue,
buffer: &super::BufferSlice,
callback: impl FnOnce(Result<Self, super::BufferAsyncError>) + Send + 'static,
) {
let size = match buffer.size {
Some(size) => size.into(),
None => buffer.buffer.map_context.lock().total_size - buffer.offset,
};
let download = Arc::new(device.create_buffer(&super::BufferDescriptor {
size,
usage: super::BufferUsages::COPY_DST | super::BufferUsages::MAP_READ,
mapped_at_creation: false,
label: None,
}));
let mut encoder =
device.create_command_encoder(&super::CommandEncoderDescriptor { label: None });
encoder.copy_buffer_to_buffer(buffer.buffer, buffer.offset, &download, 0, size);
let command_buffer: super::CommandBuffer = encoder.finish();
queue.submit(Some(command_buffer));
download
.clone()
.slice(..)
.map_async(super::MapMode::Read, move |result| {
if let Err(e) = result {
callback(Err(e));
return;
}
let mapped_range = super::DynContext::buffer_get_mapped_range(
&*download.context,
&download.id,
download.data.as_ref(),
0..size,
);
callback(Ok(Self(download, mapped_range)));
});
}
}
impl std::ops::Deref for DownloadBuffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
self.1.slice()
}
}