Vendor things
This commit is contained in:
parent
5deceec006
commit
977e3c17e5
19434 changed files with 10682014 additions and 0 deletions
1
third-party/vendor/wgpu-core/.cargo-checksum.json
vendored
Normal file
1
third-party/vendor/wgpu-core/.cargo-checksum.json
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"58df7854f816d9101372911579e00d6c23fed98a247026a377a954d9de3a6d99","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/binding_model.rs":"d92a2c21642d0bece4ce1a9877d08567a86af4991cfe0bf6ecaaaf8f8b9c8d74","src/command/bind.rs":"aa778a7a125496f31220e8aa06a7eee5c5bc524a29e77cc5a314a178a0813a80","src/command/bundle.rs":"00ac15c8c70cf58437a0f66472e6c8b73200c6ec15e2062adabd151bebc9b2fc","src/command/clear.rs":"b61144473752e363dfe9c15951702865921b568c8ee5136af7aa4237f015c383","src/command/compute.rs":"96ca2d55d9ba5f1067c701df25eb5e655557b17a45f306e3d8d31bd196839868","src/command/draw.rs":"14a0319da47e4995c2ad97f1102998b0d4beb2f6d07df8a0cb6f08023185ce7a","src/command/memory_init.rs":"b50d3d20dbf659052f19da2e79469ba6435e06370f19d6ef45e1b1128d9900b7","src/command/mod.rs":"c7b7a4dd50636694a835e48f6a65dba8cf873168a02758fae73d6c04d48dfc45","src/command/query.rs":"e12108706de23a2925d180f96dcb870d167c1d4033903d306435395284b7a0d5","src/command/render.rs":"b72e27b8a615551bb3320ceaac3e285e082522cd0524e9960e8628aa3d4b10d6","src/command/transfer.rs":"c777c6e51afb459d2b5416e31071f24e4215c66f456fee3bd8f7395f9d1c5db1","src/conv.rs":"a21506ce183e7989df0e8db29d8cd6f9884b3531d004a0e5193e3aa0b73b10c8","src/device/global.rs":"236de3a224f1e77dc5a4896c523a10be2afca1847debff1127476e6163634cfd","src/device/life.rs":"c935c15c4c7f929e378a5ea930d0d36b47616a49991c236aaa10d25ce5852d15","src/device/mod.rs":"a15d223b2f1c09ab662c777016bc90dca1411ce940d2750b0272068af1022431","src/device/queue.rs":"b1ef887b92574d2541ef6f572cd14067e2af3a514fa26d547f9c18e2cbd30b92","src/device/resource.rs":"55bf87b8bcf0199221b558c11d58028ad2b2563ee525ead648c792aacf811b84","src/device/trace.rs":"f69aa6af36a6defcf391ddb6cf12a56e37e00b4595e95c284cd7fd400394349c","src/error.rs":"ca37282283985e2b7d184b2ab7ca6f53f726432d920f8d8477bfff6fab9b34e2","src/global.rs":"cf551de97c3eb5acd0c2710da09ebd92cc863ad0bb0f53c0fd4911bf8cd3ad97","src/hal_api.rs":"92a2f0cb80f192693530ed61048919bbad446742c2370bf0944c44b1c5df8362","src/hub.rs":"49f479c3ebed842a4bc8ab2fee00bc02dceb57790fbac8ba33e1bfed795fa675","src/id.rs":"f6245d024586c7fe63ded13b3cb926b940c191bbee56aedc655e8cef74bdd66b","src/identity.rs":"c2e008e652723f7896465bfdafd5a10141cf5866e8c481a8efcf0bdaa9619a6a","src/init_tracker/buffer.rs":"a0ebf54a1e6d269c7b4aa0ac7bb8b04fd2cea3221a1d058ff33cb683b2aea3e9","src/init_tracker/mod.rs":"0867f79f83555390d0982d1dc6dcf0d4340e10cb89aa633d3c3ecc45deb3c78c","src/init_tracker/texture.rs":"37b6584aaca11c407d91f77002dcbb48d8a4876e27edd1b71b7929ef966f901d","src/instance.rs":"363484220e0936eabd79098631a1b646173632ed01126aaf56f65b300bf6df92","src/lib.rs":"27ff8dd787d41cf412e90d0c4674aa70db59e608f9eb3be485c0bd18e9f13369","src/pipeline.rs":"669219add15448fdf5fe8bc5e03fd6fd1ada2b45b07047fd8c0a9bbbcdecad8b","src/present.rs":"9a1d58bee172c1173d9471bb0ea222dde2f36bc8a082bc795ac0005659e9e7c0","src/registry.rs":"4098413de7f48e9ff15d0246793be47a0d54c95b4c8594baf9fafd222a90ba84","src/resource.rs":"a03329428f820b43810d82f990c72990a45a6bdf482d3ce4b096b0f99c6c6844","src/storage.rs":"bc70689ba299e9b4d9f4992c4d3f4dd36b1d8e71327595094981fdfd624f811a","src/track/buffer.rs":"dd6f632c6f31b15807148d705c516a8a1a8d72d02b137dd3b9d7c939447917cb","src/track/metadata.rs":"a80bd086ce825f7484ce6318a586c482d06fea0efc9c76bfa0124e480cc8b75e","src/track/mod.rs":"04cd09cf5f26262175e48cc3855b79fbd8988916c4367a55d39a4c95784d249b","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"1d786b5e9558672243ba7d913736561065ef2bd5c6105c935e982486d10841f0","src/track/texture.rs":"7d60dc81ba7f7e2c2819525b90e6e6c7760cb0920e36aeefe98e76cedd49d26e","src/validation.rs":"24057a2bc40c5e77a440fb432f0007e0b67cf41b8056394cff97208447d06513"},"package":"0f8a44dd301a30ceeed3c27d8c0090433d3da04d7b2a4042738095a424d12ae7"}
|
||||
140
third-party/vendor/wgpu-core/Cargo.toml
vendored
Normal file
140
third-party/vendor/wgpu-core/Cargo.toml
vendored
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "wgpu-core"
|
||||
version = "0.17.1"
|
||||
authors = ["wgpu developers"]
|
||||
description = "WebGPU core logic on wgpu-hal"
|
||||
homepage = "https://wgpu.rs/"
|
||||
keywords = ["graphics"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/gfx-rs/wgpu"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = [
|
||||
"--cfg",
|
||||
"docsrs",
|
||||
]
|
||||
targets = [
|
||||
"x86_64-unknown-linux-gnu",
|
||||
"x86_64-apple-darwin",
|
||||
"x86_64-pc-windows-msvc",
|
||||
"wasm32-unknown-unknown",
|
||||
]
|
||||
|
||||
[lib]
|
||||
|
||||
[dependencies.arrayvec]
|
||||
version = "0.7"
|
||||
|
||||
[dependencies.bit-vec]
|
||||
version = "0.6"
|
||||
|
||||
[dependencies.bitflags]
|
||||
version = "2"
|
||||
|
||||
[dependencies.codespan-reporting]
|
||||
version = "0.11"
|
||||
|
||||
[dependencies.hal]
|
||||
version = "0.17"
|
||||
default_features = false
|
||||
package = "wgpu-hal"
|
||||
|
||||
[dependencies.log]
|
||||
version = "0.4"
|
||||
|
||||
[dependencies.naga]
|
||||
version = "0.13.0"
|
||||
features = [
|
||||
"clone",
|
||||
"span",
|
||||
"validate",
|
||||
]
|
||||
|
||||
[dependencies.parking_lot]
|
||||
version = ">=0.11,<0.13"
|
||||
|
||||
[dependencies.profiling]
|
||||
version = "1"
|
||||
default-features = false
|
||||
|
||||
[dependencies.raw-window-handle]
|
||||
version = "0.5"
|
||||
optional = true
|
||||
|
||||
[dependencies.ron]
|
||||
version = "0.8"
|
||||
optional = true
|
||||
|
||||
[dependencies.rustc-hash]
|
||||
version = "1.1"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
features = ["serde_derive"]
|
||||
optional = true
|
||||
|
||||
[dependencies.smallvec]
|
||||
version = "1"
|
||||
|
||||
[dependencies.thiserror]
|
||||
version = "1"
|
||||
|
||||
[dependencies.wgt]
|
||||
version = "0.17"
|
||||
package = "wgpu-types"
|
||||
|
||||
[features]
|
||||
angle = ["hal/gles"]
|
||||
default = ["link"]
|
||||
dx11 = ["hal/dx11"]
|
||||
dx12 = ["hal/dx12"]
|
||||
fragile-send-sync-non-atomic-wasm = [
|
||||
"hal/fragile-send-sync-non-atomic-wasm",
|
||||
"wgt/fragile-send-sync-non-atomic-wasm",
|
||||
]
|
||||
gles = ["hal/gles"]
|
||||
id32 = []
|
||||
link = ["hal/link"]
|
||||
metal = ["hal/metal"]
|
||||
renderdoc = ["hal/renderdoc"]
|
||||
replay = [
|
||||
"serde",
|
||||
"wgt/replay",
|
||||
"arrayvec/serde",
|
||||
"naga/deserialize",
|
||||
]
|
||||
serial-pass = [
|
||||
"serde",
|
||||
"wgt/serde",
|
||||
"arrayvec/serde",
|
||||
]
|
||||
strict_asserts = ["wgt/strict_asserts"]
|
||||
trace = [
|
||||
"ron",
|
||||
"serde",
|
||||
"wgt/trace",
|
||||
"arrayvec/serde",
|
||||
"naga/serialize",
|
||||
]
|
||||
vulkan = ["hal/vulkan"]
|
||||
wgsl = ["naga/wgsl-in"]
|
||||
|
||||
[target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.web-sys]
|
||||
version = "0.3.64"
|
||||
features = [
|
||||
"HtmlCanvasElement",
|
||||
"OffscreenCanvas",
|
||||
]
|
||||
176
third-party/vendor/wgpu-core/LICENSE.APACHE
vendored
Normal file
176
third-party/vendor/wgpu-core/LICENSE.APACHE
vendored
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
21
third-party/vendor/wgpu-core/LICENSE.MIT
vendored
Normal file
21
third-party/vendor/wgpu-core/LICENSE.MIT
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2021 The gfx-rs developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
855
third-party/vendor/wgpu-core/src/binding_model.rs
vendored
Normal file
855
third-party/vendor/wgpu-core/src/binding_model.rs
vendored
Normal file
|
|
@ -0,0 +1,855 @@
|
|||
use crate::{
|
||||
device::{DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT},
|
||||
error::{ErrorFormatter, PrettyError},
|
||||
hal_api::HalApi,
|
||||
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureId, TextureViewId, Valid},
|
||||
init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction},
|
||||
resource::Resource,
|
||||
track::{BindGroupStates, UsageConflict},
|
||||
validation::{MissingBufferUsageError, MissingTextureUsageError},
|
||||
FastHashMap, Label, LifeGuard, MultiRefCount, Stored,
|
||||
};
|
||||
|
||||
use arrayvec::ArrayVec;
|
||||
|
||||
#[cfg(feature = "replay")]
|
||||
use serde::Deserialize;
|
||||
#[cfg(feature = "trace")]
|
||||
use serde::Serialize;
|
||||
|
||||
use std::{borrow::Cow, ops::Range};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum BindGroupLayoutEntryError {
|
||||
#[error("Cube dimension is not expected for texture storage")]
|
||||
StorageTextureCube,
|
||||
#[error("Read-write and read-only storage textures are not allowed by webgpu, they require the native only feature TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES")]
|
||||
StorageTextureReadWrite,
|
||||
#[error("Arrays of bindings unsupported for this type of binding")]
|
||||
ArrayUnsupported,
|
||||
#[error("Multisampled binding with sample type `TextureSampleType::Float` must have filterable set to false.")]
|
||||
SampleTypeFloatFilterableBindingMultisampled,
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateBindGroupLayoutError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Conflicting binding at index {0}")]
|
||||
ConflictBinding(u32),
|
||||
#[error("Binding {binding} entry is invalid")]
|
||||
Entry {
|
||||
binding: u32,
|
||||
#[source]
|
||||
error: BindGroupLayoutEntryError,
|
||||
},
|
||||
#[error(transparent)]
|
||||
TooManyBindings(BindingTypeMaxCountError),
|
||||
#[error("Binding index {binding} is greater than the maximum index {maximum}")]
|
||||
InvalidBindingIndex { binding: u32, maximum: u32 },
|
||||
#[error("Invalid visibility {0:?}")]
|
||||
InvalidVisibility(wgt::ShaderStages),
|
||||
}
|
||||
|
||||
//TODO: refactor this to move out `enum BindingError`.
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateBindGroupError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Bind group layout is invalid")]
|
||||
InvalidLayout,
|
||||
#[error("Buffer {0:?} is invalid or destroyed")]
|
||||
InvalidBuffer(BufferId),
|
||||
#[error("Texture view {0:?} is invalid")]
|
||||
InvalidTextureView(TextureViewId),
|
||||
#[error("Texture {0:?} is invalid")]
|
||||
InvalidTexture(TextureId),
|
||||
#[error("Sampler {0:?} is invalid")]
|
||||
InvalidSampler(SamplerId),
|
||||
#[error(
|
||||
"Binding count declared with at most {expected} items, but {actual} items were provided"
|
||||
)]
|
||||
BindingArrayPartialLengthMismatch { actual: usize, expected: usize },
|
||||
#[error(
|
||||
"Binding count declared with exactly {expected} items, but {actual} items were provided"
|
||||
)]
|
||||
BindingArrayLengthMismatch { actual: usize, expected: usize },
|
||||
#[error("Array binding provided zero elements")]
|
||||
BindingArrayZeroLength,
|
||||
#[error("Bound buffer range {range:?} does not fit in buffer of size {size}")]
|
||||
BindingRangeTooLarge {
|
||||
buffer: BufferId,
|
||||
range: Range<wgt::BufferAddress>,
|
||||
size: u64,
|
||||
},
|
||||
#[error("Buffer binding size {actual} is less than minimum {min}")]
|
||||
BindingSizeTooSmall {
|
||||
buffer: BufferId,
|
||||
actual: u64,
|
||||
min: u64,
|
||||
},
|
||||
#[error("Buffer binding size is zero")]
|
||||
BindingZeroSize(BufferId),
|
||||
#[error("Number of bindings in bind group descriptor ({actual}) does not match the number of bindings defined in the bind group layout ({expected})")]
|
||||
BindingsNumMismatch { actual: usize, expected: usize },
|
||||
#[error("Binding {0} is used at least twice in the descriptor")]
|
||||
DuplicateBinding(u32),
|
||||
#[error("Unable to find a corresponding declaration for the given binding {0}")]
|
||||
MissingBindingDeclaration(u32),
|
||||
#[error(transparent)]
|
||||
MissingBufferUsage(#[from] MissingBufferUsageError),
|
||||
#[error(transparent)]
|
||||
MissingTextureUsage(#[from] MissingTextureUsageError),
|
||||
#[error("Binding declared as a single item, but bind group is using it as an array")]
|
||||
SingleBindingExpected,
|
||||
#[error("Buffer offset {0} does not respect device's requested `{1}` limit {2}")]
|
||||
UnalignedBufferOffset(wgt::BufferAddress, &'static str, u32),
|
||||
#[error(
|
||||
"Buffer binding {binding} range {given} exceeds `max_*_buffer_binding_size` limit {limit}"
|
||||
)]
|
||||
BufferRangeTooLarge {
|
||||
binding: u32,
|
||||
given: u32,
|
||||
limit: u32,
|
||||
},
|
||||
#[error("Binding {binding} has a different type ({actual:?}) than the one in the layout ({expected:?})")]
|
||||
WrongBindingType {
|
||||
// Index of the binding
|
||||
binding: u32,
|
||||
// The type given to the function
|
||||
actual: wgt::BindingType,
|
||||
// Human-readable description of expected types
|
||||
expected: &'static str,
|
||||
},
|
||||
#[error("Texture binding {binding} expects multisampled = {layout_multisampled}, but given a view with samples = {view_samples}")]
|
||||
InvalidTextureMultisample {
|
||||
binding: u32,
|
||||
layout_multisampled: bool,
|
||||
view_samples: u32,
|
||||
},
|
||||
#[error("Texture binding {binding} expects sample type = {layout_sample_type:?}, but given a view with format = {view_format:?}")]
|
||||
InvalidTextureSampleType {
|
||||
binding: u32,
|
||||
layout_sample_type: wgt::TextureSampleType,
|
||||
view_format: wgt::TextureFormat,
|
||||
},
|
||||
#[error("Texture binding {binding} expects dimension = {layout_dimension:?}, but given a view with dimension = {view_dimension:?}")]
|
||||
InvalidTextureDimension {
|
||||
binding: u32,
|
||||
layout_dimension: wgt::TextureViewDimension,
|
||||
view_dimension: wgt::TextureViewDimension,
|
||||
},
|
||||
#[error("Storage texture binding {binding} expects format = {layout_format:?}, but given a view with format = {view_format:?}")]
|
||||
InvalidStorageTextureFormat {
|
||||
binding: u32,
|
||||
layout_format: wgt::TextureFormat,
|
||||
view_format: wgt::TextureFormat,
|
||||
},
|
||||
#[error("Storage texture bindings must have a single mip level, but given a view with mip_level_count = {mip_level_count:?} at binding {binding}")]
|
||||
InvalidStorageTextureMipLevelCount { binding: u32, mip_level_count: u32 },
|
||||
#[error("Sampler binding {binding} expects comparison = {layout_cmp}, but given a sampler with comparison = {sampler_cmp}")]
|
||||
WrongSamplerComparison {
|
||||
binding: u32,
|
||||
layout_cmp: bool,
|
||||
sampler_cmp: bool,
|
||||
},
|
||||
#[error("Sampler binding {binding} expects filtering = {layout_flt}, but given a sampler with filtering = {sampler_flt}")]
|
||||
WrongSamplerFiltering {
|
||||
binding: u32,
|
||||
layout_flt: bool,
|
||||
sampler_flt: bool,
|
||||
},
|
||||
#[error("Bound texture views can not have both depth and stencil aspects enabled")]
|
||||
DepthStencilAspect,
|
||||
#[error("The adapter does not support read access for storages texture of format {0:?}")]
|
||||
StorageReadNotSupported(wgt::TextureFormat),
|
||||
#[error(transparent)]
|
||||
ResourceUsageConflict(#[from] UsageConflict),
|
||||
}
|
||||
|
||||
impl PrettyError for CreateBindGroupError {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
match *self {
|
||||
Self::BindingZeroSize(id) => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
Self::BindingRangeTooLarge { buffer, .. } => {
|
||||
fmt.buffer_label(&buffer);
|
||||
}
|
||||
Self::BindingSizeTooSmall { buffer, .. } => {
|
||||
fmt.buffer_label(&buffer);
|
||||
}
|
||||
Self::InvalidBuffer(id) => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
Self::InvalidTextureView(id) => {
|
||||
fmt.texture_view_label(&id);
|
||||
}
|
||||
Self::InvalidSampler(id) => {
|
||||
fmt.sampler_label(&id);
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum BindingZone {
|
||||
#[error("Stage {0:?}")]
|
||||
Stage(wgt::ShaderStages),
|
||||
#[error("Whole pipeline")]
|
||||
Pipeline,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}")]
|
||||
pub struct BindingTypeMaxCountError {
|
||||
pub kind: BindingTypeMaxCountErrorKind,
|
||||
pub zone: BindingZone,
|
||||
pub limit: u32,
|
||||
pub count: u32,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum BindingTypeMaxCountErrorKind {
|
||||
DynamicUniformBuffers,
|
||||
DynamicStorageBuffers,
|
||||
SampledTextures,
|
||||
Samplers,
|
||||
StorageBuffers,
|
||||
StorageTextures,
|
||||
UniformBuffers,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct PerStageBindingTypeCounter {
|
||||
vertex: u32,
|
||||
fragment: u32,
|
||||
compute: u32,
|
||||
}
|
||||
|
||||
impl PerStageBindingTypeCounter {
|
||||
pub(crate) fn add(&mut self, stage: wgt::ShaderStages, count: u32) {
|
||||
if stage.contains(wgt::ShaderStages::VERTEX) {
|
||||
self.vertex += count;
|
||||
}
|
||||
if stage.contains(wgt::ShaderStages::FRAGMENT) {
|
||||
self.fragment += count;
|
||||
}
|
||||
if stage.contains(wgt::ShaderStages::COMPUTE) {
|
||||
self.compute += count;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn max(&self) -> (BindingZone, u32) {
|
||||
let max_value = self.vertex.max(self.fragment.max(self.compute));
|
||||
let mut stage = wgt::ShaderStages::NONE;
|
||||
if max_value == self.vertex {
|
||||
stage |= wgt::ShaderStages::VERTEX
|
||||
}
|
||||
if max_value == self.fragment {
|
||||
stage |= wgt::ShaderStages::FRAGMENT
|
||||
}
|
||||
if max_value == self.compute {
|
||||
stage |= wgt::ShaderStages::COMPUTE
|
||||
}
|
||||
(BindingZone::Stage(stage), max_value)
|
||||
}
|
||||
|
||||
pub(crate) fn merge(&mut self, other: &Self) {
|
||||
self.vertex = self.vertex.max(other.vertex);
|
||||
self.fragment = self.fragment.max(other.fragment);
|
||||
self.compute = self.compute.max(other.compute);
|
||||
}
|
||||
|
||||
pub(crate) fn validate(
|
||||
&self,
|
||||
limit: u32,
|
||||
kind: BindingTypeMaxCountErrorKind,
|
||||
) -> Result<(), BindingTypeMaxCountError> {
|
||||
let (zone, count) = self.max();
|
||||
if limit < count {
|
||||
Err(BindingTypeMaxCountError {
|
||||
kind,
|
||||
zone,
|
||||
limit,
|
||||
count,
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct BindingTypeMaxCountValidator {
|
||||
dynamic_uniform_buffers: u32,
|
||||
dynamic_storage_buffers: u32,
|
||||
sampled_textures: PerStageBindingTypeCounter,
|
||||
samplers: PerStageBindingTypeCounter,
|
||||
storage_buffers: PerStageBindingTypeCounter,
|
||||
storage_textures: PerStageBindingTypeCounter,
|
||||
uniform_buffers: PerStageBindingTypeCounter,
|
||||
}
|
||||
|
||||
impl BindingTypeMaxCountValidator {
|
||||
pub(crate) fn add_binding(&mut self, binding: &wgt::BindGroupLayoutEntry) {
|
||||
let count = binding.count.map_or(1, |count| count.get());
|
||||
match binding.ty {
|
||||
wgt::BindingType::Buffer {
|
||||
ty: wgt::BufferBindingType::Uniform,
|
||||
has_dynamic_offset,
|
||||
..
|
||||
} => {
|
||||
self.uniform_buffers.add(binding.visibility, count);
|
||||
if has_dynamic_offset {
|
||||
self.dynamic_uniform_buffers += count;
|
||||
}
|
||||
}
|
||||
wgt::BindingType::Buffer {
|
||||
ty: wgt::BufferBindingType::Storage { .. },
|
||||
has_dynamic_offset,
|
||||
..
|
||||
} => {
|
||||
self.storage_buffers.add(binding.visibility, count);
|
||||
if has_dynamic_offset {
|
||||
self.dynamic_storage_buffers += count;
|
||||
}
|
||||
}
|
||||
wgt::BindingType::Sampler { .. } => {
|
||||
self.samplers.add(binding.visibility, count);
|
||||
}
|
||||
wgt::BindingType::Texture { .. } => {
|
||||
self.sampled_textures.add(binding.visibility, count);
|
||||
}
|
||||
wgt::BindingType::StorageTexture { .. } => {
|
||||
self.storage_textures.add(binding.visibility, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn merge(&mut self, other: &Self) {
|
||||
self.dynamic_uniform_buffers += other.dynamic_uniform_buffers;
|
||||
self.dynamic_storage_buffers += other.dynamic_storage_buffers;
|
||||
self.sampled_textures.merge(&other.sampled_textures);
|
||||
self.samplers.merge(&other.samplers);
|
||||
self.storage_buffers.merge(&other.storage_buffers);
|
||||
self.storage_textures.merge(&other.storage_textures);
|
||||
self.uniform_buffers.merge(&other.uniform_buffers);
|
||||
}
|
||||
|
||||
pub(crate) fn validate(&self, limits: &wgt::Limits) -> Result<(), BindingTypeMaxCountError> {
|
||||
if limits.max_dynamic_uniform_buffers_per_pipeline_layout < self.dynamic_uniform_buffers {
|
||||
return Err(BindingTypeMaxCountError {
|
||||
kind: BindingTypeMaxCountErrorKind::DynamicUniformBuffers,
|
||||
zone: BindingZone::Pipeline,
|
||||
limit: limits.max_dynamic_uniform_buffers_per_pipeline_layout,
|
||||
count: self.dynamic_uniform_buffers,
|
||||
});
|
||||
}
|
||||
if limits.max_dynamic_storage_buffers_per_pipeline_layout < self.dynamic_storage_buffers {
|
||||
return Err(BindingTypeMaxCountError {
|
||||
kind: BindingTypeMaxCountErrorKind::DynamicStorageBuffers,
|
||||
zone: BindingZone::Pipeline,
|
||||
limit: limits.max_dynamic_storage_buffers_per_pipeline_layout,
|
||||
count: self.dynamic_storage_buffers,
|
||||
});
|
||||
}
|
||||
self.sampled_textures.validate(
|
||||
limits.max_sampled_textures_per_shader_stage,
|
||||
BindingTypeMaxCountErrorKind::SampledTextures,
|
||||
)?;
|
||||
self.storage_buffers.validate(
|
||||
limits.max_storage_buffers_per_shader_stage,
|
||||
BindingTypeMaxCountErrorKind::StorageBuffers,
|
||||
)?;
|
||||
self.samplers.validate(
|
||||
limits.max_samplers_per_shader_stage,
|
||||
BindingTypeMaxCountErrorKind::Samplers,
|
||||
)?;
|
||||
self.storage_buffers.validate(
|
||||
limits.max_storage_buffers_per_shader_stage,
|
||||
BindingTypeMaxCountErrorKind::StorageBuffers,
|
||||
)?;
|
||||
self.storage_textures.validate(
|
||||
limits.max_storage_textures_per_shader_stage,
|
||||
BindingTypeMaxCountErrorKind::StorageTextures,
|
||||
)?;
|
||||
self.uniform_buffers.validate(
|
||||
limits.max_uniform_buffers_per_shader_stage,
|
||||
BindingTypeMaxCountErrorKind::UniformBuffers,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Bindable resource and the slot to bind it to.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct BindGroupEntry<'a> {
|
||||
/// Slot for which binding provides resource. Corresponds to an entry of the same
|
||||
/// binding index in the [`BindGroupLayoutDescriptor`].
|
||||
pub binding: u32,
|
||||
/// Resource to attach to the binding
|
||||
pub resource: BindingResource<'a>,
|
||||
}
|
||||
|
||||
/// Describes a group of bindings and the resources to be bound.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct BindGroupDescriptor<'a> {
|
||||
/// Debug label of the bind group.
|
||||
///
|
||||
/// This will show up in graphics debuggers for easy identification.
|
||||
pub label: Label<'a>,
|
||||
/// The [`BindGroupLayout`] that corresponds to this bind group.
|
||||
pub layout: BindGroupLayoutId,
|
||||
/// The resources to bind to this bind group.
|
||||
pub entries: Cow<'a, [BindGroupEntry<'a>]>,
|
||||
}
|
||||
|
||||
/// Describes a [`BindGroupLayout`].
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct BindGroupLayoutDescriptor<'a> {
|
||||
/// Debug label of the bind group layout.
|
||||
///
|
||||
/// This will show up in graphics debuggers for easy identification.
|
||||
pub label: Label<'a>,
|
||||
/// Array of entries in this BindGroupLayout
|
||||
pub entries: Cow<'a, [wgt::BindGroupLayoutEntry]>,
|
||||
}
|
||||
|
||||
pub(crate) type BindEntryMap = FastHashMap<u32, wgt::BindGroupLayoutEntry>;
|
||||
|
||||
/// Bind group layout.
|
||||
///
|
||||
/// The lifetime of BGLs is a bit special. They are only referenced on CPU
|
||||
/// without considering GPU operations. And on CPU they get manual
|
||||
/// inc-refs and dec-refs. In particular, the following objects depend on them:
|
||||
/// - produced bind groups
|
||||
/// - produced pipeline layouts
|
||||
/// - pipelines with implicit layouts
|
||||
#[derive(Debug)]
|
||||
pub struct BindGroupLayout<A: hal::Api> {
|
||||
pub(crate) raw: A::BindGroupLayout,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) multi_ref_count: MultiRefCount,
|
||||
pub(crate) entries: BindEntryMap,
|
||||
#[allow(unused)]
|
||||
pub(crate) dynamic_count: usize,
|
||||
pub(crate) count_validator: BindingTypeMaxCountValidator,
|
||||
#[cfg(debug_assertions)]
|
||||
pub(crate) label: String,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for BindGroupLayout<A> {
|
||||
const TYPE: &'static str = "BindGroupLayout";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn label(&self) -> &str {
|
||||
#[cfg(debug_assertions)]
|
||||
return &self.label;
|
||||
#[cfg(not(debug_assertions))]
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreatePipelineLayoutError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Bind group layout {0:?} is invalid")]
|
||||
InvalidBindGroupLayout(BindGroupLayoutId),
|
||||
#[error(
|
||||
"Push constant at index {index} has range bound {bound} not aligned to {}",
|
||||
wgt::PUSH_CONSTANT_ALIGNMENT
|
||||
)]
|
||||
MisalignedPushConstantRange { index: usize, bound: u32 },
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
#[error("Push constant range (index {index}) provides for stage(s) {provided:?} but there exists another range that provides stage(s) {intersected:?}. Each stage may only be provided by one range")]
|
||||
MoreThanOnePushConstantRangePerStage {
|
||||
index: usize,
|
||||
provided: wgt::ShaderStages,
|
||||
intersected: wgt::ShaderStages,
|
||||
},
|
||||
#[error("Push constant at index {index} has range {}..{} which exceeds device push constant size limit 0..{max}", range.start, range.end)]
|
||||
PushConstantRangeTooLarge {
|
||||
index: usize,
|
||||
range: Range<u32>,
|
||||
max: u32,
|
||||
},
|
||||
#[error(transparent)]
|
||||
TooManyBindings(BindingTypeMaxCountError),
|
||||
#[error("Bind group layout count {actual} exceeds device bind group limit {max}")]
|
||||
TooManyGroups { actual: usize, max: usize },
|
||||
}
|
||||
|
||||
impl PrettyError for CreatePipelineLayoutError {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
if let Self::InvalidBindGroupLayout(id) = *self {
|
||||
fmt.bind_group_layout_label(&id);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum PushConstantUploadError {
|
||||
#[error("Provided push constant with indices {offset}..{end_offset} overruns matching push constant range at index {idx}, with stage(s) {:?} and indices {:?}", range.stages, range.range)]
|
||||
TooLarge {
|
||||
offset: u32,
|
||||
end_offset: u32,
|
||||
idx: usize,
|
||||
range: wgt::PushConstantRange,
|
||||
},
|
||||
#[error("Provided push constant is for stage(s) {actual:?}, stage with a partial match found at index {idx} with stage(s) {matched:?}, however push constants must be complete matches")]
|
||||
PartialRangeMatch {
|
||||
actual: wgt::ShaderStages,
|
||||
idx: usize,
|
||||
matched: wgt::ShaderStages,
|
||||
},
|
||||
#[error("Provided push constant is for stage(s) {actual:?}, but intersects a push constant range (at index {idx}) with stage(s) {missing:?}. Push constants must provide the stages for all ranges they intersect")]
|
||||
MissingStages {
|
||||
actual: wgt::ShaderStages,
|
||||
idx: usize,
|
||||
missing: wgt::ShaderStages,
|
||||
},
|
||||
#[error("Provided push constant is for stage(s) {actual:?}, however the pipeline layout has no push constant range for the stage(s) {unmatched:?}")]
|
||||
UnmatchedStages {
|
||||
actual: wgt::ShaderStages,
|
||||
unmatched: wgt::ShaderStages,
|
||||
},
|
||||
#[error("Provided push constant offset {0} does not respect `PUSH_CONSTANT_ALIGNMENT`")]
|
||||
Unaligned(u32),
|
||||
}
|
||||
|
||||
/// Describes a pipeline layout.
|
||||
///
|
||||
/// A `PipelineLayoutDescriptor` can be used to create a pipeline layout.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct PipelineLayoutDescriptor<'a> {
|
||||
/// Debug label of the pipeine layout.
|
||||
///
|
||||
/// This will show up in graphics debuggers for easy identification.
|
||||
pub label: Label<'a>,
|
||||
/// Bind groups that this pipeline uses. The first entry will provide all the bindings for
|
||||
/// "set = 0", second entry will provide all the bindings for "set = 1" etc.
|
||||
pub bind_group_layouts: Cow<'a, [BindGroupLayoutId]>,
|
||||
/// Set of push constant ranges this pipeline uses. Each shader stage that
|
||||
/// uses push constants must define the range in push constant memory that
|
||||
/// corresponds to its single `layout(push_constant)` uniform block.
|
||||
///
|
||||
/// If this array is non-empty, the
|
||||
/// [`Features::PUSH_CONSTANTS`](wgt::Features::PUSH_CONSTANTS) feature must
|
||||
/// be enabled.
|
||||
pub push_constant_ranges: Cow<'a, [wgt::PushConstantRange]>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PipelineLayout<A: hal::Api> {
|
||||
pub(crate) raw: A::PipelineLayout,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) bind_group_layout_ids: ArrayVec<Valid<BindGroupLayoutId>, { hal::MAX_BIND_GROUPS }>,
|
||||
pub(crate) push_constant_ranges: ArrayVec<wgt::PushConstantRange, { SHADER_STAGE_COUNT }>,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> PipelineLayout<A> {
|
||||
/// Validate push constants match up with expected ranges.
|
||||
pub(crate) fn validate_push_constant_ranges(
|
||||
&self,
|
||||
stages: wgt::ShaderStages,
|
||||
offset: u32,
|
||||
end_offset: u32,
|
||||
) -> Result<(), PushConstantUploadError> {
|
||||
// Don't need to validate size against the push constant size limit here,
|
||||
// as push constant ranges are already validated to be within bounds,
|
||||
// and we validate that they are within the ranges.
|
||||
|
||||
if offset % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
|
||||
return Err(PushConstantUploadError::Unaligned(offset));
|
||||
}
|
||||
|
||||
// Push constant validation looks very complicated on the surface, but
|
||||
// the problem can be range-reduced pretty well.
|
||||
//
|
||||
// Push constants require (summarized from the vulkan spec):
|
||||
// 1. For each byte in the range and for each shader stage in stageFlags,
|
||||
// there must be a push constant range in the layout that includes that
|
||||
// byte and that stage.
|
||||
// 2. For each byte in the range and for each push constant range that overlaps that byte,
|
||||
// `stage` must include all stages in that push constant range’s `stage`.
|
||||
//
|
||||
// However there are some additional constraints that help us:
|
||||
// 3. All push constant ranges are the only range that can access that stage.
|
||||
// i.e. if one range has VERTEX, no other range has VERTEX
|
||||
//
|
||||
// Therefore we can simplify the checks in the following ways:
|
||||
// - Because 3 guarantees that the push constant range has a unique stage,
|
||||
// when we check for 1, we can simply check that our entire updated range
|
||||
// is within a push constant range. i.e. our range for a specific stage cannot
|
||||
// intersect more than one push constant range.
|
||||
let mut used_stages = wgt::ShaderStages::NONE;
|
||||
for (idx, range) in self.push_constant_ranges.iter().enumerate() {
|
||||
// contains not intersects due to 2
|
||||
if stages.contains(range.stages) {
|
||||
if !(range.range.start <= offset && end_offset <= range.range.end) {
|
||||
return Err(PushConstantUploadError::TooLarge {
|
||||
offset,
|
||||
end_offset,
|
||||
idx,
|
||||
range: range.clone(),
|
||||
});
|
||||
}
|
||||
used_stages |= range.stages;
|
||||
} else if stages.intersects(range.stages) {
|
||||
// Will be caught by used stages check below, but we can do this because of 1
|
||||
// and is more helpful to the user.
|
||||
return Err(PushConstantUploadError::PartialRangeMatch {
|
||||
actual: stages,
|
||||
idx,
|
||||
matched: range.stages,
|
||||
});
|
||||
}
|
||||
|
||||
// The push constant range intersects range we are uploading
|
||||
if offset < range.range.end && range.range.start < end_offset {
|
||||
// But requires stages we don't provide
|
||||
if !stages.contains(range.stages) {
|
||||
return Err(PushConstantUploadError::MissingStages {
|
||||
actual: stages,
|
||||
idx,
|
||||
missing: stages,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
if used_stages != stages {
|
||||
return Err(PushConstantUploadError::UnmatchedStages {
|
||||
actual: stages,
|
||||
unmatched: stages - used_stages,
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for PipelineLayout<A> {
|
||||
const TYPE: &'static str = "PipelineLayout";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(Deserialize))]
|
||||
pub struct BufferBinding {
|
||||
pub buffer_id: BufferId,
|
||||
pub offset: wgt::BufferAddress,
|
||||
pub size: Option<wgt::BufferSize>,
|
||||
}
|
||||
|
||||
// Note: Duplicated in `wgpu-rs` as `BindingResource`
|
||||
// They're different enough that it doesn't make sense to share a common type
|
||||
#[derive(Debug, Clone)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub enum BindingResource<'a> {
|
||||
Buffer(BufferBinding),
|
||||
BufferArray(Cow<'a, [BufferBinding]>),
|
||||
Sampler(SamplerId),
|
||||
SamplerArray(Cow<'a, [SamplerId]>),
|
||||
TextureView(TextureViewId),
|
||||
TextureViewArray(Cow<'a, [TextureViewId]>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum BindError {
|
||||
#[error(
|
||||
"Bind group {group} expects {expected} dynamic offset{s0}. However {actual} dynamic offset{s1} were provided.",
|
||||
s0 = if *.expected >= 2 { "s" } else { "" },
|
||||
s1 = if *.actual >= 2 { "s" } else { "" },
|
||||
)]
|
||||
MismatchedDynamicOffsetCount {
|
||||
group: u32,
|
||||
actual: usize,
|
||||
expected: usize,
|
||||
},
|
||||
#[error(
|
||||
"Dynamic binding index {idx} (targeting bind group {group}, binding {binding}) with value {offset}, does not respect device's requested `{limit_name}` limit: {alignment}"
|
||||
)]
|
||||
UnalignedDynamicBinding {
|
||||
idx: usize,
|
||||
group: u32,
|
||||
binding: u32,
|
||||
offset: u32,
|
||||
alignment: u32,
|
||||
limit_name: &'static str,
|
||||
},
|
||||
#[error(
|
||||
"Dynamic binding offset index {idx} with offset {offset} would overrun the buffer bound to bind group {group} -> binding {binding}. \
|
||||
Buffer size is {buffer_size} bytes, the binding binds bytes {binding_range:?}, meaning the maximum the binding can be offset is {maximum_dynamic_offset} bytes",
|
||||
)]
|
||||
DynamicBindingOutOfBounds {
|
||||
idx: usize,
|
||||
group: u32,
|
||||
binding: u32,
|
||||
offset: u32,
|
||||
buffer_size: wgt::BufferAddress,
|
||||
binding_range: Range<wgt::BufferAddress>,
|
||||
maximum_dynamic_offset: wgt::BufferAddress,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BindGroupDynamicBindingData {
|
||||
/// The index of the binding.
|
||||
///
|
||||
/// Used for more descriptive errors.
|
||||
pub(crate) binding_idx: u32,
|
||||
/// The size of the buffer.
|
||||
///
|
||||
/// Used for more descriptive errors.
|
||||
pub(crate) buffer_size: wgt::BufferAddress,
|
||||
/// The range that the binding covers.
|
||||
///
|
||||
/// Used for more descriptive errors.
|
||||
pub(crate) binding_range: Range<wgt::BufferAddress>,
|
||||
/// The maximum value the dynamic offset can have before running off the end of the buffer.
|
||||
pub(crate) maximum_dynamic_offset: wgt::BufferAddress,
|
||||
/// The binding type.
|
||||
pub(crate) binding_type: wgt::BufferBindingType,
|
||||
}
|
||||
|
||||
pub(crate) fn buffer_binding_type_alignment(
|
||||
limits: &wgt::Limits,
|
||||
binding_type: wgt::BufferBindingType,
|
||||
) -> (u32, &'static str) {
|
||||
match binding_type {
|
||||
wgt::BufferBindingType::Uniform => (
|
||||
limits.min_uniform_buffer_offset_alignment,
|
||||
"min_uniform_buffer_offset_alignment",
|
||||
),
|
||||
wgt::BufferBindingType::Storage { .. } => (
|
||||
limits.min_storage_buffer_offset_alignment,
|
||||
"min_storage_buffer_offset_alignment",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BindGroup<A: HalApi> {
|
||||
pub(crate) raw: A::BindGroup,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) layout_id: Valid<BindGroupLayoutId>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) used: BindGroupStates<A>,
|
||||
pub(crate) used_buffer_ranges: Vec<BufferInitTrackerAction>,
|
||||
pub(crate) used_texture_ranges: Vec<TextureInitTrackerAction>,
|
||||
pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>,
|
||||
/// Actual binding sizes for buffers that don't have `min_binding_size`
|
||||
/// specified in BGL. Listed in the order of iteration of `BGL.entries`.
|
||||
pub(crate) late_buffer_binding_sizes: Vec<wgt::BufferSize>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> BindGroup<A> {
|
||||
pub(crate) fn validate_dynamic_bindings(
|
||||
&self,
|
||||
bind_group_index: u32,
|
||||
offsets: &[wgt::DynamicOffset],
|
||||
limits: &wgt::Limits,
|
||||
) -> Result<(), BindError> {
|
||||
if self.dynamic_binding_info.len() != offsets.len() {
|
||||
return Err(BindError::MismatchedDynamicOffsetCount {
|
||||
group: bind_group_index,
|
||||
expected: self.dynamic_binding_info.len(),
|
||||
actual: offsets.len(),
|
||||
});
|
||||
}
|
||||
|
||||
for (idx, (info, &offset)) in self
|
||||
.dynamic_binding_info
|
||||
.iter()
|
||||
.zip(offsets.iter())
|
||||
.enumerate()
|
||||
{
|
||||
let (alignment, limit_name) = buffer_binding_type_alignment(limits, info.binding_type);
|
||||
if offset as wgt::BufferAddress % alignment as u64 != 0 {
|
||||
return Err(BindError::UnalignedDynamicBinding {
|
||||
group: bind_group_index,
|
||||
binding: info.binding_idx,
|
||||
idx,
|
||||
offset,
|
||||
alignment,
|
||||
limit_name,
|
||||
});
|
||||
}
|
||||
|
||||
if offset as wgt::BufferAddress > info.maximum_dynamic_offset {
|
||||
return Err(BindError::DynamicBindingOutOfBounds {
|
||||
group: bind_group_index,
|
||||
binding: info.binding_idx,
|
||||
idx,
|
||||
offset,
|
||||
buffer_size: info.buffer_size,
|
||||
binding_range: info.binding_range.clone(),
|
||||
maximum_dynamic_offset: info.maximum_dynamic_offset,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: HalApi> Resource for BindGroup<A> {
|
||||
const TYPE: &'static str = "BindGroup";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum GetBindGroupLayoutError {
|
||||
#[error("Pipeline is invalid")]
|
||||
InvalidPipeline,
|
||||
#[error("Invalid group index {0}")]
|
||||
InvalidGroupIndex(u32),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error, Eq, PartialEq)]
|
||||
#[error("Buffer is bound with size {bound_size} where the shader expects {shader_size} in group[{group_index}] compact index {compact_index}")]
|
||||
pub struct LateMinBufferBindingSizeMismatch {
|
||||
pub group_index: u32,
|
||||
pub compact_index: usize,
|
||||
pub shader_size: wgt::BufferAddress,
|
||||
pub bound_size: wgt::BufferAddress,
|
||||
}
|
||||
356
third-party/vendor/wgpu-core/src/command/bind.rs
vendored
Normal file
356
third-party/vendor/wgpu-core/src/command/bind.rs
vendored
Normal file
|
|
@ -0,0 +1,356 @@
|
|||
use crate::{
|
||||
binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PipelineLayout},
|
||||
device::SHADER_STAGE_COUNT,
|
||||
hal_api::HalApi,
|
||||
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid},
|
||||
pipeline::LateSizedBufferGroup,
|
||||
storage::Storage,
|
||||
Stored,
|
||||
};
|
||||
|
||||
use arrayvec::ArrayVec;
|
||||
|
||||
type BindGroupMask = u8;
|
||||
|
||||
mod compat {
|
||||
use std::ops::Range;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Entry<T> {
|
||||
assigned: Option<T>,
|
||||
expected: Option<T>,
|
||||
}
|
||||
impl<T> Default for Entry<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
assigned: None,
|
||||
expected: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T: Copy + PartialEq> Entry<T> {
|
||||
fn is_active(&self) -> bool {
|
||||
self.assigned.is_some() && self.expected.is_some()
|
||||
}
|
||||
|
||||
fn is_valid(&self) -> bool {
|
||||
self.expected.is_none() || self.expected == self.assigned
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Manager<T> {
|
||||
entries: [Entry<T>; hal::MAX_BIND_GROUPS],
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialEq> Manager<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
entries: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_range(&self, start_index: usize) -> Range<usize> {
|
||||
// find first incompatible entry
|
||||
let end = self
|
||||
.entries
|
||||
.iter()
|
||||
.position(|e| e.expected.is_none() || e.assigned != e.expected)
|
||||
.unwrap_or(self.entries.len());
|
||||
start_index..end.max(start_index)
|
||||
}
|
||||
|
||||
pub fn update_expectations(&mut self, expectations: &[T]) -> Range<usize> {
|
||||
let start_index = self
|
||||
.entries
|
||||
.iter()
|
||||
.zip(expectations)
|
||||
.position(|(e, &expect)| e.expected != Some(expect))
|
||||
.unwrap_or(expectations.len());
|
||||
for (e, &expect) in self.entries[start_index..]
|
||||
.iter_mut()
|
||||
.zip(expectations[start_index..].iter())
|
||||
{
|
||||
e.expected = Some(expect);
|
||||
}
|
||||
for e in self.entries[expectations.len()..].iter_mut() {
|
||||
e.expected = None;
|
||||
}
|
||||
self.make_range(start_index)
|
||||
}
|
||||
|
||||
pub fn assign(&mut self, index: usize, value: T) -> Range<usize> {
|
||||
self.entries[index].assigned = Some(value);
|
||||
self.make_range(index)
|
||||
}
|
||||
|
||||
pub fn list_active(&self) -> impl Iterator<Item = usize> + '_ {
|
||||
self.entries
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, e)| if e.is_active() { Some(i) } else { None })
|
||||
}
|
||||
|
||||
pub fn invalid_mask(&self) -> super::BindGroupMask {
|
||||
self.entries.iter().enumerate().fold(0, |mask, (i, entry)| {
|
||||
if entry.is_valid() {
|
||||
mask
|
||||
} else {
|
||||
mask | 1u8 << i
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compatibility() {
|
||||
let mut man = Manager::<i32>::new();
|
||||
man.entries[0] = Entry {
|
||||
expected: Some(3),
|
||||
assigned: Some(2),
|
||||
};
|
||||
man.entries[1] = Entry {
|
||||
expected: Some(1),
|
||||
assigned: Some(1),
|
||||
};
|
||||
man.entries[2] = Entry {
|
||||
expected: Some(4),
|
||||
assigned: Some(5),
|
||||
};
|
||||
// check that we rebind [1] after [0] became compatible
|
||||
assert_eq!(man.assign(0, 3), 0..2);
|
||||
// check that nothing is rebound
|
||||
assert_eq!(man.update_expectations(&[3, 2]), 1..1);
|
||||
// check that [1] and [2] are rebound on expectations change
|
||||
assert_eq!(man.update_expectations(&[3, 1, 5]), 1..3);
|
||||
// reset the first two bindings
|
||||
assert_eq!(man.update_expectations(&[4, 6, 5]), 0..0);
|
||||
// check that nothing is rebound, even if there is a match,
|
||||
// since earlier binding is incompatible.
|
||||
assert_eq!(man.assign(1, 6), 1..1);
|
||||
// finally, bind everything
|
||||
assert_eq!(man.assign(0, 4), 0..3);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct LateBufferBinding {
|
||||
shader_expect_size: wgt::BufferAddress,
|
||||
bound_size: wgt::BufferAddress,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(super) struct EntryPayload {
|
||||
pub(super) group_id: Option<Stored<BindGroupId>>,
|
||||
pub(super) dynamic_offsets: Vec<wgt::DynamicOffset>,
|
||||
late_buffer_bindings: Vec<LateBufferBinding>,
|
||||
/// Since `LateBufferBinding` may contain information about the bindings
|
||||
/// not used by the pipeline, we need to know when to stop validating.
|
||||
pub(super) late_bindings_effective_count: usize,
|
||||
}
|
||||
|
||||
impl EntryPayload {
|
||||
fn reset(&mut self) {
|
||||
self.group_id = None;
|
||||
self.dynamic_offsets.clear();
|
||||
self.late_buffer_bindings.clear();
|
||||
self.late_bindings_effective_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct Binder {
|
||||
pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored`
|
||||
manager: compat::Manager<Valid<BindGroupLayoutId>>,
|
||||
payloads: [EntryPayload; hal::MAX_BIND_GROUPS],
|
||||
}
|
||||
|
||||
impl Binder {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {
|
||||
pipeline_layout_id: None,
|
||||
manager: compat::Manager::new(),
|
||||
payloads: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn reset(&mut self) {
|
||||
self.pipeline_layout_id = None;
|
||||
self.manager = compat::Manager::new();
|
||||
for payload in self.payloads.iter_mut() {
|
||||
payload.reset();
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn change_pipeline_layout<'a, A: HalApi>(
|
||||
&'a mut self,
|
||||
guard: &Storage<PipelineLayout<A>, PipelineLayoutId>,
|
||||
new_id: Valid<PipelineLayoutId>,
|
||||
late_sized_buffer_groups: &[LateSizedBufferGroup],
|
||||
) -> (usize, &'a [EntryPayload]) {
|
||||
let old_id_opt = self.pipeline_layout_id.replace(new_id);
|
||||
let new = &guard[new_id];
|
||||
|
||||
let mut bind_range = self.manager.update_expectations(&new.bind_group_layout_ids);
|
||||
|
||||
// Update the buffer binding sizes that are required by shaders.
|
||||
for (payload, late_group) in self.payloads.iter_mut().zip(late_sized_buffer_groups) {
|
||||
payload.late_bindings_effective_count = late_group.shader_sizes.len();
|
||||
for (late_binding, &shader_expect_size) in payload
|
||||
.late_buffer_bindings
|
||||
.iter_mut()
|
||||
.zip(late_group.shader_sizes.iter())
|
||||
{
|
||||
late_binding.shader_expect_size = shader_expect_size;
|
||||
}
|
||||
if late_group.shader_sizes.len() > payload.late_buffer_bindings.len() {
|
||||
for &shader_expect_size in
|
||||
late_group.shader_sizes[payload.late_buffer_bindings.len()..].iter()
|
||||
{
|
||||
payload.late_buffer_bindings.push(LateBufferBinding {
|
||||
shader_expect_size,
|
||||
bound_size: 0,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(old_id) = old_id_opt {
|
||||
let old = &guard[old_id];
|
||||
// root constants are the base compatibility property
|
||||
if old.push_constant_ranges != new.push_constant_ranges {
|
||||
bind_range.start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
(bind_range.start, &self.payloads[bind_range])
|
||||
}
|
||||
|
||||
pub(super) fn assign_group<'a, A: HalApi>(
|
||||
&'a mut self,
|
||||
index: usize,
|
||||
bind_group_id: Valid<BindGroupId>,
|
||||
bind_group: &BindGroup<A>,
|
||||
offsets: &[wgt::DynamicOffset],
|
||||
) -> &'a [EntryPayload] {
|
||||
log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
|
||||
debug_assert_eq!(A::VARIANT, bind_group_id.0.backend());
|
||||
|
||||
let payload = &mut self.payloads[index];
|
||||
payload.group_id = Some(Stored {
|
||||
value: bind_group_id,
|
||||
ref_count: bind_group.life_guard.add_ref(),
|
||||
});
|
||||
payload.dynamic_offsets.clear();
|
||||
payload.dynamic_offsets.extend_from_slice(offsets);
|
||||
|
||||
// Fill out the actual binding sizes for buffers,
|
||||
// whose layout doesn't specify `min_binding_size`.
|
||||
for (late_binding, late_size) in payload
|
||||
.late_buffer_bindings
|
||||
.iter_mut()
|
||||
.zip(bind_group.late_buffer_binding_sizes.iter())
|
||||
{
|
||||
late_binding.bound_size = late_size.get();
|
||||
}
|
||||
if bind_group.late_buffer_binding_sizes.len() > payload.late_buffer_bindings.len() {
|
||||
for late_size in
|
||||
bind_group.late_buffer_binding_sizes[payload.late_buffer_bindings.len()..].iter()
|
||||
{
|
||||
payload.late_buffer_bindings.push(LateBufferBinding {
|
||||
shader_expect_size: 0,
|
||||
bound_size: late_size.get(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let bind_range = self.manager.assign(index, bind_group.layout_id);
|
||||
&self.payloads[bind_range]
|
||||
}
|
||||
|
||||
pub(super) fn list_active(&self) -> impl Iterator<Item = Valid<BindGroupId>> + '_ {
|
||||
let payloads = &self.payloads;
|
||||
self.manager
|
||||
.list_active()
|
||||
.map(move |index| payloads[index].group_id.as_ref().unwrap().value)
|
||||
}
|
||||
|
||||
pub(super) fn invalid_mask(&self) -> BindGroupMask {
|
||||
self.manager.invalid_mask()
|
||||
}
|
||||
|
||||
/// Scan active buffer bindings corresponding to layouts without `min_binding_size` specified.
|
||||
pub(super) fn check_late_buffer_bindings(
|
||||
&self,
|
||||
) -> Result<(), LateMinBufferBindingSizeMismatch> {
|
||||
for group_index in self.manager.list_active() {
|
||||
let payload = &self.payloads[group_index];
|
||||
for (compact_index, late_binding) in payload.late_buffer_bindings
|
||||
[..payload.late_bindings_effective_count]
|
||||
.iter()
|
||||
.enumerate()
|
||||
{
|
||||
if late_binding.bound_size < late_binding.shader_expect_size {
|
||||
return Err(LateMinBufferBindingSizeMismatch {
|
||||
group_index: group_index as u32,
|
||||
compact_index,
|
||||
shader_size: late_binding.shader_expect_size,
|
||||
bound_size: late_binding.bound_size,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct PushConstantChange {
|
||||
stages: wgt::ShaderStages,
|
||||
offset: u32,
|
||||
enable: bool,
|
||||
}
|
||||
|
||||
/// Break up possibly overlapping push constant ranges into a set of
|
||||
/// non-overlapping ranges which contain all the stage flags of the
|
||||
/// original ranges. This allows us to zero out (or write any value)
|
||||
/// to every possible value.
|
||||
pub fn compute_nonoverlapping_ranges(
|
||||
ranges: &[wgt::PushConstantRange],
|
||||
) -> ArrayVec<wgt::PushConstantRange, { SHADER_STAGE_COUNT * 2 }> {
|
||||
if ranges.is_empty() {
|
||||
return ArrayVec::new();
|
||||
}
|
||||
debug_assert!(ranges.len() <= SHADER_STAGE_COUNT);
|
||||
|
||||
let mut breaks: ArrayVec<PushConstantChange, { SHADER_STAGE_COUNT * 2 }> = ArrayVec::new();
|
||||
for range in ranges {
|
||||
breaks.push(PushConstantChange {
|
||||
stages: range.stages,
|
||||
offset: range.range.start,
|
||||
enable: true,
|
||||
});
|
||||
breaks.push(PushConstantChange {
|
||||
stages: range.stages,
|
||||
offset: range.range.end,
|
||||
enable: false,
|
||||
});
|
||||
}
|
||||
breaks.sort_unstable_by_key(|change| change.offset);
|
||||
|
||||
let mut output_ranges = ArrayVec::new();
|
||||
let mut position = 0_u32;
|
||||
let mut stages = wgt::ShaderStages::NONE;
|
||||
|
||||
for bk in breaks {
|
||||
if bk.offset - position > 0 && !stages.is_empty() {
|
||||
output_ranges.push(wgt::PushConstantRange {
|
||||
stages,
|
||||
range: position..bk.offset,
|
||||
})
|
||||
}
|
||||
position = bk.offset;
|
||||
stages.set(bk.stages, bk.enable);
|
||||
}
|
||||
|
||||
output_ranges
|
||||
}
|
||||
1668
third-party/vendor/wgpu-core/src/command/bundle.rs
vendored
Normal file
1668
third-party/vendor/wgpu-core/src/command/bundle.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
461
third-party/vendor/wgpu-core/src/command/clear.rs
vendored
Normal file
461
third-party/vendor/wgpu-core/src/command/clear.rs
vendored
Normal file
|
|
@ -0,0 +1,461 @@
|
|||
use std::ops::Range;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
use crate::device::trace::Command as TraceCommand;
|
||||
use crate::{
|
||||
command::CommandBuffer,
|
||||
get_lowest_common_denom,
|
||||
global::Global,
|
||||
hal_api::HalApi,
|
||||
hub::Token,
|
||||
id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid},
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
init_tracker::{MemoryInitKind, TextureInitRange},
|
||||
resource::{Texture, TextureClearMode},
|
||||
storage,
|
||||
track::{TextureSelector, TextureTracker},
|
||||
};
|
||||
|
||||
use hal::CommandEncoder as _;
|
||||
use thiserror::Error;
|
||||
use wgt::{
|
||||
math::align_to, BufferAddress, BufferSize, BufferUsages, ImageSubresourceRange, TextureAspect,
|
||||
};
|
||||
|
||||
/// Error encountered while attempting a clear.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ClearError {
|
||||
#[error("To use clear_texture the CLEAR_TEXTURE feature needs to be enabled")]
|
||||
MissingClearTextureFeature,
|
||||
#[error("Command encoder {0:?} is invalid")]
|
||||
InvalidCommandEncoder(CommandEncoderId),
|
||||
#[error("Device {0:?} is invalid")]
|
||||
InvalidDevice(DeviceId),
|
||||
#[error("Buffer {0:?} is invalid or destroyed")]
|
||||
InvalidBuffer(BufferId),
|
||||
#[error("Texture {0:?} is invalid or destroyed")]
|
||||
InvalidTexture(TextureId),
|
||||
#[error("Texture {0:?} can not be cleared")]
|
||||
NoValidTextureClearMode(TextureId),
|
||||
#[error("Buffer clear size {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
|
||||
UnalignedFillSize(BufferSize),
|
||||
#[error("Buffer offset {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
|
||||
UnalignedBufferOffset(BufferAddress),
|
||||
#[error("Clear of {start_offset}..{end_offset} would end up overrunning the bounds of the buffer of size {buffer_size}")]
|
||||
BufferOverrun {
|
||||
start_offset: BufferAddress,
|
||||
end_offset: BufferAddress,
|
||||
buffer_size: BufferAddress,
|
||||
},
|
||||
#[error("Destination buffer is missing the `COPY_DST` usage flag")]
|
||||
MissingCopyDstUsageFlag(Option<BufferId>, Option<TextureId>),
|
||||
#[error("Texture lacks the aspects that were specified in the image subresource range. Texture with format {texture_format:?}, specified was {subresource_range_aspects:?}")]
|
||||
MissingTextureAspect {
|
||||
texture_format: wgt::TextureFormat,
|
||||
subresource_range_aspects: TextureAspect,
|
||||
},
|
||||
#[error("Image subresource level range is outside of the texture's level range. texture range is {texture_level_range:?}, \
|
||||
whereas subesource range specified start {subresource_base_mip_level} and count {subresource_mip_level_count:?}")]
|
||||
InvalidTextureLevelRange {
|
||||
texture_level_range: Range<u32>,
|
||||
subresource_base_mip_level: u32,
|
||||
subresource_mip_level_count: Option<u32>,
|
||||
},
|
||||
#[error("Image subresource layer range is outside of the texture's layer range. texture range is {texture_layer_range:?}, \
|
||||
whereas subesource range specified start {subresource_base_array_layer} and count {subresource_array_layer_count:?}")]
|
||||
InvalidTextureLayerRange {
|
||||
texture_layer_range: Range<u32>,
|
||||
subresource_base_array_layer: u32,
|
||||
subresource_array_layer_count: Option<u32>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn command_encoder_clear_buffer<A: HalApi>(
|
||||
&self,
|
||||
command_encoder_id: CommandEncoderId,
|
||||
dst: BufferId,
|
||||
offset: BufferAddress,
|
||||
size: Option<BufferSize>,
|
||||
) -> Result<(), ClearError> {
|
||||
profiling::scope!("CommandEncoder::fill_buffer");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)
|
||||
.map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?;
|
||||
let (buffer_guard, _) = hub.buffers.read(&mut token);
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::ClearBuffer { dst, offset, size });
|
||||
}
|
||||
|
||||
let (dst_buffer, dst_pending) = cmd_buf
|
||||
.trackers
|
||||
.buffers
|
||||
.set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST)
|
||||
.ok_or(ClearError::InvalidBuffer(dst))?;
|
||||
let dst_raw = dst_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ClearError::InvalidBuffer(dst))?;
|
||||
if !dst_buffer.usage.contains(BufferUsages::COPY_DST) {
|
||||
return Err(ClearError::MissingCopyDstUsageFlag(Some(dst), None));
|
||||
}
|
||||
|
||||
// Check if offset & size are valid.
|
||||
if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(ClearError::UnalignedBufferOffset(offset));
|
||||
}
|
||||
if let Some(size) = size {
|
||||
if size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(ClearError::UnalignedFillSize(size));
|
||||
}
|
||||
let destination_end_offset = offset + size.get();
|
||||
if destination_end_offset > dst_buffer.size {
|
||||
return Err(ClearError::BufferOverrun {
|
||||
start_offset: offset,
|
||||
end_offset: destination_end_offset,
|
||||
buffer_size: dst_buffer.size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let end = match size {
|
||||
Some(size) => offset + size.get(),
|
||||
None => dst_buffer.size,
|
||||
};
|
||||
if offset == end {
|
||||
log::trace!("Ignoring fill_buffer of size 0");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Mark dest as initialized.
|
||||
cmd_buf
|
||||
.buffer_memory_init_actions
|
||||
.extend(dst_buffer.initialization_status.create_action(
|
||||
dst,
|
||||
offset..end,
|
||||
MemoryInitKind::ImplicitlyInitialized,
|
||||
));
|
||||
// actual hal barrier & operation
|
||||
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
|
||||
let cmd_buf_raw = cmd_buf.encoder.open();
|
||||
unsafe {
|
||||
cmd_buf_raw.transition_buffers(dst_barrier.into_iter());
|
||||
cmd_buf_raw.clear_buffer(dst_raw, offset..end);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn command_encoder_clear_texture<A: HalApi>(
|
||||
&self,
|
||||
command_encoder_id: CommandEncoderId,
|
||||
dst: TextureId,
|
||||
subresource_range: &ImageSubresourceRange,
|
||||
) -> Result<(), ClearError> {
|
||||
profiling::scope!("CommandEncoder::clear_texture");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (device_guard, mut token) = hub.devices.write(&mut token);
|
||||
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)
|
||||
.map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?;
|
||||
let (_, mut token) = hub.buffers.read(&mut token); // skip token
|
||||
let (texture_guard, _) = hub.textures.read(&mut token);
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::ClearTexture {
|
||||
dst,
|
||||
subresource_range: *subresource_range,
|
||||
});
|
||||
}
|
||||
|
||||
if !cmd_buf.support_clear_texture {
|
||||
return Err(ClearError::MissingClearTextureFeature);
|
||||
}
|
||||
|
||||
let dst_texture = texture_guard
|
||||
.get(dst)
|
||||
.map_err(|_| ClearError::InvalidTexture(dst))?;
|
||||
|
||||
// Check if subresource aspects are valid.
|
||||
let clear_aspects =
|
||||
hal::FormatAspects::new(dst_texture.desc.format, subresource_range.aspect);
|
||||
if clear_aspects.is_empty() {
|
||||
return Err(ClearError::MissingTextureAspect {
|
||||
texture_format: dst_texture.desc.format,
|
||||
subresource_range_aspects: subresource_range.aspect,
|
||||
});
|
||||
};
|
||||
|
||||
// Check if subresource level range is valid
|
||||
let subresource_mip_range = subresource_range.mip_range(dst_texture.full_range.mips.end);
|
||||
if dst_texture.full_range.mips.start > subresource_mip_range.start
|
||||
|| dst_texture.full_range.mips.end < subresource_mip_range.end
|
||||
{
|
||||
return Err(ClearError::InvalidTextureLevelRange {
|
||||
texture_level_range: dst_texture.full_range.mips.clone(),
|
||||
subresource_base_mip_level: subresource_range.base_mip_level,
|
||||
subresource_mip_level_count: subresource_range.mip_level_count,
|
||||
});
|
||||
}
|
||||
// Check if subresource layer range is valid
|
||||
let subresource_layer_range =
|
||||
subresource_range.layer_range(dst_texture.full_range.layers.end);
|
||||
if dst_texture.full_range.layers.start > subresource_layer_range.start
|
||||
|| dst_texture.full_range.layers.end < subresource_layer_range.end
|
||||
{
|
||||
return Err(ClearError::InvalidTextureLayerRange {
|
||||
texture_layer_range: dst_texture.full_range.layers.clone(),
|
||||
subresource_base_array_layer: subresource_range.base_array_layer,
|
||||
subresource_array_layer_count: subresource_range.array_layer_count,
|
||||
});
|
||||
}
|
||||
|
||||
let device = &device_guard[cmd_buf.device_id.value];
|
||||
|
||||
clear_texture(
|
||||
&*texture_guard,
|
||||
Valid(dst),
|
||||
TextureInitRange {
|
||||
mip_range: subresource_mip_range,
|
||||
layer_range: subresource_layer_range,
|
||||
},
|
||||
cmd_buf.encoder.open(),
|
||||
&mut cmd_buf.trackers.textures,
|
||||
&device.alignments,
|
||||
&device.zero_buffer,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn clear_texture<A: HalApi>(
|
||||
storage: &storage::Storage<Texture<A>, TextureId>,
|
||||
dst_texture_id: Valid<TextureId>,
|
||||
range: TextureInitRange,
|
||||
encoder: &mut A::CommandEncoder,
|
||||
texture_tracker: &mut TextureTracker<A>,
|
||||
alignments: &hal::Alignments,
|
||||
zero_buffer: &A::Buffer,
|
||||
) -> Result<(), ClearError> {
|
||||
let dst_texture = &storage[dst_texture_id];
|
||||
|
||||
let dst_raw = dst_texture
|
||||
.inner
|
||||
.as_raw()
|
||||
.ok_or(ClearError::InvalidTexture(dst_texture_id.0))?;
|
||||
|
||||
// Issue the right barrier.
|
||||
let clear_usage = match dst_texture.clear_mode {
|
||||
TextureClearMode::BufferCopy => hal::TextureUses::COPY_DST,
|
||||
TextureClearMode::RenderPass {
|
||||
is_color: false, ..
|
||||
} => hal::TextureUses::DEPTH_STENCIL_WRITE,
|
||||
TextureClearMode::RenderPass { is_color: true, .. } => hal::TextureUses::COLOR_TARGET,
|
||||
TextureClearMode::None => {
|
||||
return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0));
|
||||
}
|
||||
};
|
||||
|
||||
let selector = TextureSelector {
|
||||
mips: range.mip_range.clone(),
|
||||
layers: range.layer_range.clone(),
|
||||
};
|
||||
|
||||
// If we're in a texture-init usecase, we know that the texture is already
|
||||
// tracked since whatever caused the init requirement, will have caused the
|
||||
// usage tracker to be aware of the texture. Meaning, that it is safe to
|
||||
// call call change_replace_tracked if the life_guard is already gone (i.e.
|
||||
// the user no longer holds on to this texture).
|
||||
//
|
||||
// On the other hand, when coming via command_encoder_clear_texture, the
|
||||
// life_guard is still there since in order to call it a texture object is
|
||||
// needed.
|
||||
//
|
||||
// We could in theory distinguish these two scenarios in the internal
|
||||
// clear_texture api in order to remove this check and call the cheaper
|
||||
// change_replace_tracked whenever possible.
|
||||
let dst_barrier = texture_tracker
|
||||
.set_single(dst_texture, dst_texture_id.0, selector, clear_usage)
|
||||
.unwrap()
|
||||
.map(|pending| pending.into_hal(dst_texture));
|
||||
unsafe {
|
||||
encoder.transition_textures(dst_barrier.into_iter());
|
||||
}
|
||||
|
||||
// Record actual clearing
|
||||
match dst_texture.clear_mode {
|
||||
TextureClearMode::BufferCopy => clear_texture_via_buffer_copies::<A>(
|
||||
&dst_texture.desc,
|
||||
alignments,
|
||||
zero_buffer,
|
||||
range,
|
||||
encoder,
|
||||
dst_raw,
|
||||
),
|
||||
TextureClearMode::RenderPass { is_color, .. } => {
|
||||
clear_texture_via_render_passes(dst_texture, range, is_color, encoder)?
|
||||
}
|
||||
TextureClearMode::None => {
|
||||
return Err(ClearError::NoValidTextureClearMode(dst_texture_id.0));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear_texture_via_buffer_copies<A: hal::Api>(
|
||||
texture_desc: &wgt::TextureDescriptor<(), Vec<wgt::TextureFormat>>,
|
||||
alignments: &hal::Alignments,
|
||||
zero_buffer: &A::Buffer, // Buffer of size device::ZERO_BUFFER_SIZE
|
||||
range: TextureInitRange,
|
||||
encoder: &mut A::CommandEncoder,
|
||||
dst_raw: &A::Texture,
|
||||
) {
|
||||
assert_eq!(
|
||||
hal::FormatAspects::from(texture_desc.format),
|
||||
hal::FormatAspects::COLOR
|
||||
);
|
||||
|
||||
// Gather list of zero_buffer copies and issue a single command then to perform them
|
||||
let mut zero_buffer_copy_regions = Vec::new();
|
||||
let buffer_copy_pitch = alignments.buffer_copy_pitch.get() as u32;
|
||||
let (block_width, block_height) = texture_desc.format.block_dimensions();
|
||||
let block_size = texture_desc.format.block_size(None).unwrap();
|
||||
|
||||
let bytes_per_row_alignment = get_lowest_common_denom(buffer_copy_pitch, block_size);
|
||||
|
||||
for mip_level in range.mip_range {
|
||||
let mut mip_size = texture_desc.mip_level_size(mip_level).unwrap();
|
||||
// Round to multiple of block size
|
||||
mip_size.width = align_to(mip_size.width, block_width);
|
||||
mip_size.height = align_to(mip_size.height, block_height);
|
||||
|
||||
let bytes_per_row = align_to(
|
||||
mip_size.width / block_width * block_size,
|
||||
bytes_per_row_alignment,
|
||||
);
|
||||
|
||||
let max_rows_per_copy = crate::device::ZERO_BUFFER_SIZE as u32 / bytes_per_row;
|
||||
// round down to a multiple of rows needed by the texture format
|
||||
let max_rows_per_copy = max_rows_per_copy / block_height * block_height;
|
||||
assert!(
|
||||
max_rows_per_copy > 0,
|
||||
"Zero buffer size is too small to fill a single row \
|
||||
of a texture with format {:?} and desc {:?}",
|
||||
texture_desc.format,
|
||||
texture_desc.size
|
||||
);
|
||||
|
||||
let z_range = 0..(if texture_desc.dimension == wgt::TextureDimension::D3 {
|
||||
mip_size.depth_or_array_layers
|
||||
} else {
|
||||
1
|
||||
});
|
||||
|
||||
for array_layer in range.layer_range.clone() {
|
||||
// TODO: Only doing one layer at a time for volume textures right now.
|
||||
for z in z_range.clone() {
|
||||
// May need multiple copies for each subresource! However, we
|
||||
// assume that we never need to split a row.
|
||||
let mut num_rows_left = mip_size.height;
|
||||
while num_rows_left > 0 {
|
||||
let num_rows = num_rows_left.min(max_rows_per_copy);
|
||||
|
||||
zero_buffer_copy_regions.push(hal::BufferTextureCopy {
|
||||
buffer_layout: wgt::ImageDataLayout {
|
||||
offset: 0,
|
||||
bytes_per_row: Some(bytes_per_row),
|
||||
rows_per_image: None,
|
||||
},
|
||||
texture_base: hal::TextureCopyBase {
|
||||
mip_level,
|
||||
array_layer,
|
||||
origin: wgt::Origin3d {
|
||||
x: 0, // Always full rows
|
||||
y: mip_size.height - num_rows_left,
|
||||
z,
|
||||
},
|
||||
aspect: hal::FormatAspects::COLOR,
|
||||
},
|
||||
size: hal::CopyExtent {
|
||||
width: mip_size.width, // full row
|
||||
height: num_rows,
|
||||
depth: 1, // Only single slice of volume texture at a time right now
|
||||
},
|
||||
});
|
||||
|
||||
num_rows_left -= num_rows;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
encoder.copy_buffer_to_texture(zero_buffer, dst_raw, zero_buffer_copy_regions.into_iter());
|
||||
}
|
||||
}
|
||||
|
||||
fn clear_texture_via_render_passes<A: hal::Api>(
|
||||
dst_texture: &Texture<A>,
|
||||
range: TextureInitRange,
|
||||
is_color: bool,
|
||||
encoder: &mut A::CommandEncoder,
|
||||
) -> Result<(), ClearError> {
|
||||
assert_eq!(dst_texture.desc.dimension, wgt::TextureDimension::D2);
|
||||
|
||||
let extent_base = wgt::Extent3d {
|
||||
width: dst_texture.desc.size.width,
|
||||
height: dst_texture.desc.size.height,
|
||||
depth_or_array_layers: 1, // Only one layer is cleared at a time.
|
||||
};
|
||||
|
||||
for mip_level in range.mip_range {
|
||||
let extent = extent_base.mip_level_size(mip_level, dst_texture.desc.dimension);
|
||||
for depth_or_layer in range.layer_range.clone() {
|
||||
let color_attachments_tmp;
|
||||
let (color_attachments, depth_stencil_attachment) = if is_color {
|
||||
color_attachments_tmp = [Some(hal::ColorAttachment {
|
||||
target: hal::Attachment {
|
||||
view: dst_texture.get_clear_view(mip_level, depth_or_layer),
|
||||
usage: hal::TextureUses::COLOR_TARGET,
|
||||
},
|
||||
resolve_target: None,
|
||||
ops: hal::AttachmentOps::STORE,
|
||||
clear_value: wgt::Color::TRANSPARENT,
|
||||
})];
|
||||
(&color_attachments_tmp[..], None)
|
||||
} else {
|
||||
(
|
||||
&[][..],
|
||||
Some(hal::DepthStencilAttachment {
|
||||
target: hal::Attachment {
|
||||
view: dst_texture.get_clear_view(mip_level, depth_or_layer),
|
||||
usage: hal::TextureUses::DEPTH_STENCIL_WRITE,
|
||||
},
|
||||
depth_ops: hal::AttachmentOps::STORE,
|
||||
stencil_ops: hal::AttachmentOps::STORE,
|
||||
clear_value: (0.0, 0),
|
||||
}),
|
||||
)
|
||||
};
|
||||
unsafe {
|
||||
encoder.begin_render_pass(&hal::RenderPassDescriptor {
|
||||
label: Some("(wgpu internal) clear_texture clear pass"),
|
||||
extent,
|
||||
sample_count: dst_texture.desc.sample_count,
|
||||
color_attachments,
|
||||
depth_stencil_attachment,
|
||||
multiview: None,
|
||||
});
|
||||
encoder.end_render_pass();
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
992
third-party/vendor/wgpu-core/src/command/compute.rs
vendored
Normal file
992
third-party/vendor/wgpu-core/src/command/compute.rs
vendored
Normal file
|
|
@ -0,0 +1,992 @@
|
|||
use crate::{
|
||||
binding_model::{
|
||||
BindError, BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError,
|
||||
},
|
||||
command::{
|
||||
bind::Binder,
|
||||
end_pipeline_statistics_query,
|
||||
memory_init::{fixup_discarded_surfaces, SurfacesInDiscardState},
|
||||
BasePass, BasePassRef, BindGroupStateChange, CommandBuffer, CommandEncoderError,
|
||||
CommandEncoderStatus, MapPassErr, PassErrorScope, QueryUseError, StateChange,
|
||||
},
|
||||
device::{MissingDownlevelFlags, MissingFeatures},
|
||||
error::{ErrorFormatter, PrettyError},
|
||||
global::Global,
|
||||
hal_api::HalApi,
|
||||
hub::Token,
|
||||
id,
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
init_tracker::MemoryInitKind,
|
||||
pipeline,
|
||||
resource::{self, Buffer, Texture},
|
||||
storage::Storage,
|
||||
track::{Tracker, UsageConflict, UsageScope},
|
||||
validation::{check_buffer_usage, MissingBufferUsageError},
|
||||
Label,
|
||||
};
|
||||
|
||||
use hal::CommandEncoder as _;
|
||||
use thiserror::Error;
|
||||
|
||||
use std::{fmt, mem, str};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "trace"),
|
||||
derive(serde::Serialize)
|
||||
)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "replay"),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
pub enum ComputeCommand {
|
||||
SetBindGroup {
|
||||
index: u32,
|
||||
num_dynamic_offsets: u8,
|
||||
bind_group_id: id::BindGroupId,
|
||||
},
|
||||
SetPipeline(id::ComputePipelineId),
|
||||
|
||||
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
|
||||
SetPushConstant {
|
||||
/// The byte offset within the push constant storage to write to. This
|
||||
/// must be a multiple of four.
|
||||
offset: u32,
|
||||
|
||||
/// The number of bytes to write. This must be a multiple of four.
|
||||
size_bytes: u32,
|
||||
|
||||
/// Index in [`BasePass::push_constant_data`] of the start of the data
|
||||
/// to be written.
|
||||
///
|
||||
/// Note: this is not a byte offset like `offset`. Rather, it is the
|
||||
/// index of the first `u32` element in `push_constant_data` to read.
|
||||
values_offset: u32,
|
||||
},
|
||||
|
||||
Dispatch([u32; 3]),
|
||||
DispatchIndirect {
|
||||
buffer_id: id::BufferId,
|
||||
offset: wgt::BufferAddress,
|
||||
},
|
||||
PushDebugGroup {
|
||||
color: u32,
|
||||
len: usize,
|
||||
},
|
||||
PopDebugGroup,
|
||||
InsertDebugMarker {
|
||||
color: u32,
|
||||
len: usize,
|
||||
},
|
||||
WriteTimestamp {
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
},
|
||||
BeginPipelineStatisticsQuery {
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
},
|
||||
EndPipelineStatisticsQuery,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
|
||||
pub struct ComputePass {
|
||||
base: BasePass<ComputeCommand>,
|
||||
parent_id: id::CommandEncoderId,
|
||||
|
||||
// Resource binding dedupe state.
|
||||
#[cfg_attr(feature = "serial-pass", serde(skip))]
|
||||
current_bind_groups: BindGroupStateChange,
|
||||
#[cfg_attr(feature = "serial-pass", serde(skip))]
|
||||
current_pipeline: StateChange<id::ComputePipelineId>,
|
||||
}
|
||||
|
||||
impl ComputePass {
|
||||
pub fn new(parent_id: id::CommandEncoderId, desc: &ComputePassDescriptor) -> Self {
|
||||
Self {
|
||||
base: BasePass::new(&desc.label),
|
||||
parent_id,
|
||||
|
||||
current_bind_groups: BindGroupStateChange::new(),
|
||||
current_pipeline: StateChange::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parent_id(&self) -> id::CommandEncoderId {
|
||||
self.parent_id
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
pub fn into_command(self) -> crate::device::trace::Command {
|
||||
crate::device::trace::Command::RunComputePass { base: self.base }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ComputePass {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"ComputePass {{ encoder_id: {:?}, data: {:?} commands and {:?} dynamic offsets }}",
|
||||
self.parent_id,
|
||||
self.base.commands.len(),
|
||||
self.base.dynamic_offsets.len()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ComputePassDescriptor<'a> {
|
||||
pub label: Label<'a>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error, Eq, PartialEq)]
|
||||
#[non_exhaustive]
|
||||
pub enum DispatchError {
|
||||
#[error("Compute pipeline must be set")]
|
||||
MissingPipeline,
|
||||
#[error("The pipeline layout, associated with the current compute pipeline, contains a bind group layout at index {index} which is incompatible with the bind group layout associated with the bind group at {index}")]
|
||||
IncompatibleBindGroup {
|
||||
index: u32,
|
||||
//expected: BindGroupLayoutId,
|
||||
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
|
||||
},
|
||||
#[error(
|
||||
"Each current dispatch group size dimension ({current:?}) must be less or equal to {limit}"
|
||||
)]
|
||||
InvalidGroupSize { current: [u32; 3], limit: u32 },
|
||||
#[error(transparent)]
|
||||
BindingSizeTooSmall(#[from] LateMinBufferBindingSizeMismatch),
|
||||
}
|
||||
|
||||
/// Error encountered when performing a compute pass.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum ComputePassErrorInner {
|
||||
#[error(transparent)]
|
||||
Encoder(#[from] CommandEncoderError),
|
||||
#[error("Bind group {0:?} is invalid")]
|
||||
InvalidBindGroup(id::BindGroupId),
|
||||
#[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
|
||||
BindGroupIndexOutOfRange { index: u32, max: u32 },
|
||||
#[error("Compute pipeline {0:?} is invalid")]
|
||||
InvalidPipeline(id::ComputePipelineId),
|
||||
#[error("QuerySet {0:?} is invalid")]
|
||||
InvalidQuerySet(id::QuerySetId),
|
||||
#[error("Indirect buffer {0:?} is invalid or destroyed")]
|
||||
InvalidIndirectBuffer(id::BufferId),
|
||||
#[error("Indirect buffer uses bytes {offset}..{end_offset} which overruns indirect buffer of size {buffer_size}")]
|
||||
IndirectBufferOverrun {
|
||||
offset: u64,
|
||||
end_offset: u64,
|
||||
buffer_size: u64,
|
||||
},
|
||||
#[error("Buffer {0:?} is invalid or destroyed")]
|
||||
InvalidBuffer(id::BufferId),
|
||||
#[error(transparent)]
|
||||
ResourceUsageConflict(#[from] UsageConflict),
|
||||
#[error(transparent)]
|
||||
MissingBufferUsage(#[from] MissingBufferUsageError),
|
||||
#[error("Cannot pop debug group, because number of pushed debug groups is zero")]
|
||||
InvalidPopDebugGroup,
|
||||
#[error(transparent)]
|
||||
Dispatch(#[from] DispatchError),
|
||||
#[error(transparent)]
|
||||
Bind(#[from] BindError),
|
||||
#[error(transparent)]
|
||||
PushConstants(#[from] PushConstantUploadError),
|
||||
#[error(transparent)]
|
||||
QueryUse(#[from] QueryUseError),
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
}
|
||||
|
||||
impl PrettyError for ComputePassErrorInner {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
match *self {
|
||||
Self::InvalidBindGroup(id) => {
|
||||
fmt.bind_group_label(&id);
|
||||
}
|
||||
Self::InvalidPipeline(id) => {
|
||||
fmt.compute_pipeline_label(&id);
|
||||
}
|
||||
Self::InvalidIndirectBuffer(id) => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Error encountered when performing a compute pass.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[error("{scope}")]
|
||||
pub struct ComputePassError {
|
||||
pub scope: PassErrorScope,
|
||||
#[source]
|
||||
inner: ComputePassErrorInner,
|
||||
}
|
||||
impl PrettyError for ComputePassError {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
// This error is wrapper for the inner error,
|
||||
// but the scope has useful labels
|
||||
fmt.error(self);
|
||||
self.scope.fmt_pretty(fmt);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> MapPassErr<T, ComputePassError> for Result<T, E>
|
||||
where
|
||||
E: Into<ComputePassErrorInner>,
|
||||
{
|
||||
fn map_pass_err(self, scope: PassErrorScope) -> Result<T, ComputePassError> {
|
||||
self.map_err(|inner| ComputePassError {
|
||||
scope,
|
||||
inner: inner.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct State<A: HalApi> {
|
||||
binder: Binder,
|
||||
pipeline: Option<id::ComputePipelineId>,
|
||||
scope: UsageScope<A>,
|
||||
debug_scope_depth: u32,
|
||||
}
|
||||
|
||||
impl<A: HalApi> State<A> {
|
||||
fn is_ready(&self) -> Result<(), DispatchError> {
|
||||
let bind_mask = self.binder.invalid_mask();
|
||||
if bind_mask != 0 {
|
||||
//let (expected, provided) = self.binder.entries[index as usize].info();
|
||||
return Err(DispatchError::IncompatibleBindGroup {
|
||||
index: bind_mask.trailing_zeros(),
|
||||
});
|
||||
}
|
||||
if self.pipeline.is_none() {
|
||||
return Err(DispatchError::MissingPipeline);
|
||||
}
|
||||
self.binder.check_late_buffer_bindings()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// `extra_buffer` is there to represent the indirect buffer that is also
|
||||
// part of the usage scope.
|
||||
fn flush_states(
|
||||
&mut self,
|
||||
raw_encoder: &mut A::CommandEncoder,
|
||||
base_trackers: &mut Tracker<A>,
|
||||
bind_group_guard: &Storage<BindGroup<A>, id::BindGroupId>,
|
||||
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
|
||||
texture_guard: &Storage<Texture<A>, id::TextureId>,
|
||||
indirect_buffer: Option<id::Valid<id::BufferId>>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
for id in self.binder.list_active() {
|
||||
unsafe {
|
||||
self.scope
|
||||
.merge_bind_group(texture_guard, &bind_group_guard[id].used)?
|
||||
};
|
||||
// Note: stateless trackers are not merged: the lifetime reference
|
||||
// is held to the bind group itself.
|
||||
}
|
||||
|
||||
for id in self.binder.list_active() {
|
||||
unsafe {
|
||||
base_trackers.set_and_remove_from_usage_scope_sparse(
|
||||
texture_guard,
|
||||
&mut self.scope,
|
||||
&bind_group_guard[id].used,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Add the state of the indirect buffer if it hasn't been hit before.
|
||||
unsafe {
|
||||
base_trackers
|
||||
.buffers
|
||||
.set_and_remove_from_usage_scope_sparse(&mut self.scope.buffers, indirect_buffer);
|
||||
}
|
||||
|
||||
log::trace!("Encoding dispatch barriers");
|
||||
|
||||
CommandBuffer::drain_barriers(raw_encoder, base_trackers, buffer_guard, texture_guard);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Common routines between render/compute
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn command_encoder_run_compute_pass<A: HalApi>(
|
||||
&self,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
pass: &ComputePass,
|
||||
) -> Result<(), ComputePassError> {
|
||||
self.command_encoder_run_compute_pass_impl::<A>(encoder_id, pass.base.as_ref())
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn command_encoder_run_compute_pass_impl<A: HalApi>(
|
||||
&self,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
base: BasePassRef<ComputeCommand>,
|
||||
) -> Result<(), ComputePassError> {
|
||||
profiling::scope!("CommandEncoder::run_compute_pass");
|
||||
let init_scope = PassErrorScope::Pass(encoder_id);
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (device_guard, mut token) = hub.devices.read(&mut token);
|
||||
|
||||
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
|
||||
// Spell out the type, to placate rust-analyzer.
|
||||
// https://github.com/rust-lang/rust-analyzer/issues/12247
|
||||
let cmd_buf: &mut CommandBuffer<A> =
|
||||
CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)
|
||||
.map_pass_err(init_scope)?;
|
||||
|
||||
// We automatically keep extending command buffers over time, and because
|
||||
// we want to insert a command buffer _before_ what we're about to record,
|
||||
// we need to make sure to close the previous one.
|
||||
cmd_buf.encoder.close();
|
||||
// We will reset this to `Recording` if we succeed, acts as a fail-safe.
|
||||
cmd_buf.status = CommandEncoderStatus::Error;
|
||||
let raw = cmd_buf.encoder.open();
|
||||
|
||||
let device = &device_guard[cmd_buf.device_id.value];
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(crate::device::trace::Command::RunComputePass {
|
||||
base: BasePass::from_ref(base),
|
||||
});
|
||||
}
|
||||
|
||||
let (_, mut token) = hub.render_bundles.read(&mut token);
|
||||
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
|
||||
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
|
||||
let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token);
|
||||
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
|
||||
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
|
||||
let (texture_guard, _) = hub.textures.read(&mut token);
|
||||
|
||||
let mut state = State {
|
||||
binder: Binder::new(),
|
||||
pipeline: None,
|
||||
scope: UsageScope::new(&*buffer_guard, &*texture_guard),
|
||||
debug_scope_depth: 0,
|
||||
};
|
||||
let mut temp_offsets = Vec::new();
|
||||
let mut dynamic_offset_count = 0;
|
||||
let mut string_offset = 0;
|
||||
let mut active_query = None;
|
||||
|
||||
cmd_buf.trackers.set_size(
|
||||
Some(&*buffer_guard),
|
||||
Some(&*texture_guard),
|
||||
None,
|
||||
None,
|
||||
Some(&*bind_group_guard),
|
||||
Some(&*pipeline_guard),
|
||||
None,
|
||||
None,
|
||||
Some(&*query_set_guard),
|
||||
);
|
||||
|
||||
let hal_desc = hal::ComputePassDescriptor { label: base.label };
|
||||
unsafe {
|
||||
raw.begin_compute_pass(&hal_desc);
|
||||
}
|
||||
|
||||
let mut intermediate_trackers = Tracker::<A>::new();
|
||||
|
||||
// Immediate texture inits required because of prior discards. Need to
|
||||
// be inserted before texture reads.
|
||||
let mut pending_discard_init_fixups = SurfacesInDiscardState::new();
|
||||
|
||||
for command in base.commands {
|
||||
match *command {
|
||||
ComputeCommand::SetBindGroup {
|
||||
index,
|
||||
num_dynamic_offsets,
|
||||
bind_group_id,
|
||||
} => {
|
||||
let scope = PassErrorScope::SetBindGroup(bind_group_id);
|
||||
|
||||
let max_bind_groups = cmd_buf.limits.max_bind_groups;
|
||||
if index >= max_bind_groups {
|
||||
return Err(ComputePassErrorInner::BindGroupIndexOutOfRange {
|
||||
index,
|
||||
max: max_bind_groups,
|
||||
})
|
||||
.map_pass_err(scope);
|
||||
}
|
||||
|
||||
temp_offsets.clear();
|
||||
temp_offsets.extend_from_slice(
|
||||
&base.dynamic_offsets[dynamic_offset_count
|
||||
..dynamic_offset_count + (num_dynamic_offsets as usize)],
|
||||
);
|
||||
dynamic_offset_count += num_dynamic_offsets as usize;
|
||||
|
||||
let bind_group: &BindGroup<A> = cmd_buf
|
||||
.trackers
|
||||
.bind_groups
|
||||
.add_single(&*bind_group_guard, bind_group_id)
|
||||
.ok_or(ComputePassErrorInner::InvalidBindGroup(bind_group_id))
|
||||
.map_pass_err(scope)?;
|
||||
bind_group
|
||||
.validate_dynamic_bindings(index, &temp_offsets, &cmd_buf.limits)
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
cmd_buf.buffer_memory_init_actions.extend(
|
||||
bind_group.used_buffer_ranges.iter().filter_map(
|
||||
|action| match buffer_guard.get(action.id) {
|
||||
Ok(buffer) => buffer.initialization_status.check_action(action),
|
||||
Err(_) => None,
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
for action in bind_group.used_texture_ranges.iter() {
|
||||
pending_discard_init_fixups.extend(
|
||||
cmd_buf
|
||||
.texture_memory_actions
|
||||
.register_init_action(action, &texture_guard),
|
||||
);
|
||||
}
|
||||
|
||||
let pipeline_layout_id = state.binder.pipeline_layout_id;
|
||||
let entries = state.binder.assign_group(
|
||||
index as usize,
|
||||
id::Valid(bind_group_id),
|
||||
bind_group,
|
||||
&temp_offsets,
|
||||
);
|
||||
if !entries.is_empty() {
|
||||
let pipeline_layout =
|
||||
&pipeline_layout_guard[pipeline_layout_id.unwrap()].raw;
|
||||
for (i, e) in entries.iter().enumerate() {
|
||||
let raw_bg = &bind_group_guard[e.group_id.as_ref().unwrap().value].raw;
|
||||
unsafe {
|
||||
raw.set_bind_group(
|
||||
pipeline_layout,
|
||||
index + i as u32,
|
||||
raw_bg,
|
||||
&e.dynamic_offsets,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ComputeCommand::SetPipeline(pipeline_id) => {
|
||||
let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
|
||||
|
||||
state.pipeline = Some(pipeline_id);
|
||||
|
||||
let pipeline: &pipeline::ComputePipeline<A> = cmd_buf
|
||||
.trackers
|
||||
.compute_pipelines
|
||||
.add_single(&*pipeline_guard, pipeline_id)
|
||||
.ok_or(ComputePassErrorInner::InvalidPipeline(pipeline_id))
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
unsafe {
|
||||
raw.set_compute_pipeline(&pipeline.raw);
|
||||
}
|
||||
|
||||
// Rebind resources
|
||||
if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) {
|
||||
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value];
|
||||
|
||||
let (start_index, entries) = state.binder.change_pipeline_layout(
|
||||
&*pipeline_layout_guard,
|
||||
pipeline.layout_id.value,
|
||||
&pipeline.late_sized_buffer_groups,
|
||||
);
|
||||
if !entries.is_empty() {
|
||||
for (i, e) in entries.iter().enumerate() {
|
||||
let raw_bg =
|
||||
&bind_group_guard[e.group_id.as_ref().unwrap().value].raw;
|
||||
unsafe {
|
||||
raw.set_bind_group(
|
||||
&pipeline_layout.raw,
|
||||
start_index as u32 + i as u32,
|
||||
raw_bg,
|
||||
&e.dynamic_offsets,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear push constant ranges
|
||||
let non_overlapping = super::bind::compute_nonoverlapping_ranges(
|
||||
&pipeline_layout.push_constant_ranges,
|
||||
);
|
||||
for range in non_overlapping {
|
||||
let offset = range.range.start;
|
||||
let size_bytes = range.range.end - offset;
|
||||
super::push_constant_clear(
|
||||
offset,
|
||||
size_bytes,
|
||||
|clear_offset, clear_data| unsafe {
|
||||
raw.set_push_constants(
|
||||
&pipeline_layout.raw,
|
||||
wgt::ShaderStages::COMPUTE,
|
||||
clear_offset,
|
||||
clear_data,
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
ComputeCommand::SetPushConstant {
|
||||
offset,
|
||||
size_bytes,
|
||||
values_offset,
|
||||
} => {
|
||||
let scope = PassErrorScope::SetPushConstant;
|
||||
|
||||
let end_offset_bytes = offset + size_bytes;
|
||||
let values_end_offset =
|
||||
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
|
||||
let data_slice =
|
||||
&base.push_constant_data[(values_offset as usize)..values_end_offset];
|
||||
|
||||
let pipeline_layout_id = state
|
||||
.binder
|
||||
.pipeline_layout_id
|
||||
//TODO: don't error here, lazily update the push constants
|
||||
.ok_or(ComputePassErrorInner::Dispatch(
|
||||
DispatchError::MissingPipeline,
|
||||
))
|
||||
.map_pass_err(scope)?;
|
||||
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
|
||||
|
||||
pipeline_layout
|
||||
.validate_push_constant_ranges(
|
||||
wgt::ShaderStages::COMPUTE,
|
||||
offset,
|
||||
end_offset_bytes,
|
||||
)
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
unsafe {
|
||||
raw.set_push_constants(
|
||||
&pipeline_layout.raw,
|
||||
wgt::ShaderStages::COMPUTE,
|
||||
offset,
|
||||
data_slice,
|
||||
);
|
||||
}
|
||||
}
|
||||
ComputeCommand::Dispatch(groups) => {
|
||||
let scope = PassErrorScope::Dispatch {
|
||||
indirect: false,
|
||||
pipeline: state.pipeline,
|
||||
};
|
||||
|
||||
state.is_ready().map_pass_err(scope)?;
|
||||
state
|
||||
.flush_states(
|
||||
raw,
|
||||
&mut intermediate_trackers,
|
||||
&*bind_group_guard,
|
||||
&*buffer_guard,
|
||||
&*texture_guard,
|
||||
None,
|
||||
)
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
let groups_size_limit = cmd_buf.limits.max_compute_workgroups_per_dimension;
|
||||
|
||||
if groups[0] > groups_size_limit
|
||||
|| groups[1] > groups_size_limit
|
||||
|| groups[2] > groups_size_limit
|
||||
{
|
||||
return Err(ComputePassErrorInner::Dispatch(
|
||||
DispatchError::InvalidGroupSize {
|
||||
current: groups,
|
||||
limit: groups_size_limit,
|
||||
},
|
||||
))
|
||||
.map_pass_err(scope);
|
||||
}
|
||||
|
||||
unsafe {
|
||||
raw.dispatch(groups);
|
||||
}
|
||||
}
|
||||
ComputeCommand::DispatchIndirect { buffer_id, offset } => {
|
||||
let scope = PassErrorScope::Dispatch {
|
||||
indirect: true,
|
||||
pipeline: state.pipeline,
|
||||
};
|
||||
|
||||
state.is_ready().map_pass_err(scope)?;
|
||||
|
||||
device
|
||||
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
let indirect_buffer: &Buffer<A> = state
|
||||
.scope
|
||||
.buffers
|
||||
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
|
||||
.map_pass_err(scope)?;
|
||||
check_buffer_usage(indirect_buffer.usage, wgt::BufferUsages::INDIRECT)
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
|
||||
if end_offset > indirect_buffer.size {
|
||||
return Err(ComputePassErrorInner::IndirectBufferOverrun {
|
||||
offset,
|
||||
end_offset,
|
||||
buffer_size: indirect_buffer.size,
|
||||
})
|
||||
.map_pass_err(scope);
|
||||
}
|
||||
|
||||
let buf_raw = indirect_buffer
|
||||
.raw
|
||||
.as_ref()
|
||||
.ok_or(ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
let stride = 3 * 4; // 3 integers, x/y/z group size
|
||||
|
||||
cmd_buf.buffer_memory_init_actions.extend(
|
||||
indirect_buffer.initialization_status.create_action(
|
||||
buffer_id,
|
||||
offset..(offset + stride),
|
||||
MemoryInitKind::NeedsInitializedMemory,
|
||||
),
|
||||
);
|
||||
|
||||
state
|
||||
.flush_states(
|
||||
raw,
|
||||
&mut intermediate_trackers,
|
||||
&*bind_group_guard,
|
||||
&*buffer_guard,
|
||||
&*texture_guard,
|
||||
Some(id::Valid(buffer_id)),
|
||||
)
|
||||
.map_pass_err(scope)?;
|
||||
unsafe {
|
||||
raw.dispatch_indirect(buf_raw, offset);
|
||||
}
|
||||
}
|
||||
ComputeCommand::PushDebugGroup { color: _, len } => {
|
||||
state.debug_scope_depth += 1;
|
||||
let label =
|
||||
str::from_utf8(&base.string_data[string_offset..string_offset + len])
|
||||
.unwrap();
|
||||
string_offset += len;
|
||||
unsafe {
|
||||
raw.begin_debug_marker(label);
|
||||
}
|
||||
}
|
||||
ComputeCommand::PopDebugGroup => {
|
||||
let scope = PassErrorScope::PopDebugGroup;
|
||||
|
||||
if state.debug_scope_depth == 0 {
|
||||
return Err(ComputePassErrorInner::InvalidPopDebugGroup)
|
||||
.map_pass_err(scope);
|
||||
}
|
||||
state.debug_scope_depth -= 1;
|
||||
unsafe {
|
||||
raw.end_debug_marker();
|
||||
}
|
||||
}
|
||||
ComputeCommand::InsertDebugMarker { color: _, len } => {
|
||||
let label =
|
||||
str::from_utf8(&base.string_data[string_offset..string_offset + len])
|
||||
.unwrap();
|
||||
string_offset += len;
|
||||
unsafe { raw.insert_debug_marker(label) }
|
||||
}
|
||||
ComputeCommand::WriteTimestamp {
|
||||
query_set_id,
|
||||
query_index,
|
||||
} => {
|
||||
let scope = PassErrorScope::WriteTimestamp;
|
||||
|
||||
device
|
||||
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES)
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
let query_set: &resource::QuerySet<A> = cmd_buf
|
||||
.trackers
|
||||
.query_sets
|
||||
.add_single(&*query_set_guard, query_set_id)
|
||||
.ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id))
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
query_set
|
||||
.validate_and_write_timestamp(raw, query_set_id, query_index, None)
|
||||
.map_pass_err(scope)?;
|
||||
}
|
||||
ComputeCommand::BeginPipelineStatisticsQuery {
|
||||
query_set_id,
|
||||
query_index,
|
||||
} => {
|
||||
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
|
||||
|
||||
let query_set: &resource::QuerySet<A> = cmd_buf
|
||||
.trackers
|
||||
.query_sets
|
||||
.add_single(&*query_set_guard, query_set_id)
|
||||
.ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id))
|
||||
.map_pass_err(scope)?;
|
||||
|
||||
query_set
|
||||
.validate_and_begin_pipeline_statistics_query(
|
||||
raw,
|
||||
query_set_id,
|
||||
query_index,
|
||||
None,
|
||||
&mut active_query,
|
||||
)
|
||||
.map_pass_err(scope)?;
|
||||
}
|
||||
ComputeCommand::EndPipelineStatisticsQuery => {
|
||||
let scope = PassErrorScope::EndPipelineStatisticsQuery;
|
||||
|
||||
end_pipeline_statistics_query(raw, &*query_set_guard, &mut active_query)
|
||||
.map_pass_err(scope)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
raw.end_compute_pass();
|
||||
}
|
||||
// We've successfully recorded the compute pass, bring the
|
||||
// command buffer out of the error state.
|
||||
cmd_buf.status = CommandEncoderStatus::Recording;
|
||||
|
||||
// Stop the current command buffer.
|
||||
cmd_buf.encoder.close();
|
||||
|
||||
// Create a new command buffer, which we will insert _before_ the body of the compute pass.
|
||||
//
|
||||
// Use that buffer to insert barriers and clear discarded images.
|
||||
let transit = cmd_buf.encoder.open();
|
||||
fixup_discarded_surfaces(
|
||||
pending_discard_init_fixups.into_iter(),
|
||||
transit,
|
||||
&texture_guard,
|
||||
&mut cmd_buf.trackers.textures,
|
||||
device,
|
||||
);
|
||||
CommandBuffer::insert_barriers_from_tracker(
|
||||
transit,
|
||||
&mut cmd_buf.trackers,
|
||||
&intermediate_trackers,
|
||||
&*buffer_guard,
|
||||
&*texture_guard,
|
||||
);
|
||||
// Close the command buffer, and swap it with the previous.
|
||||
cmd_buf.encoder.close_and_swap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub mod compute_ffi {
|
||||
use super::{ComputeCommand, ComputePass};
|
||||
use crate::{id, RawString};
|
||||
use std::{convert::TryInto, ffi, slice};
|
||||
use wgt::{BufferAddress, DynamicOffset};
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `offset_length` elements.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_set_bind_group(
|
||||
pass: &mut ComputePass,
|
||||
index: u32,
|
||||
bind_group_id: id::BindGroupId,
|
||||
offsets: *const DynamicOffset,
|
||||
offset_length: usize,
|
||||
) {
|
||||
let redundant = unsafe {
|
||||
pass.current_bind_groups.set_and_check_redundant(
|
||||
bind_group_id,
|
||||
index,
|
||||
&mut pass.base.dynamic_offsets,
|
||||
offsets,
|
||||
offset_length,
|
||||
)
|
||||
};
|
||||
|
||||
if redundant {
|
||||
return;
|
||||
}
|
||||
|
||||
pass.base.commands.push(ComputeCommand::SetBindGroup {
|
||||
index,
|
||||
num_dynamic_offsets: offset_length.try_into().unwrap(),
|
||||
bind_group_id,
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_set_pipeline(
|
||||
pass: &mut ComputePass,
|
||||
pipeline_id: id::ComputePipelineId,
|
||||
) {
|
||||
if pass.current_pipeline.set_and_check_redundant(pipeline_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
pass.base
|
||||
.commands
|
||||
.push(ComputeCommand::SetPipeline(pipeline_id));
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given pointer is
|
||||
/// valid for `size_bytes` bytes.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_set_push_constant(
|
||||
pass: &mut ComputePass,
|
||||
offset: u32,
|
||||
size_bytes: u32,
|
||||
data: *const u8,
|
||||
) {
|
||||
assert_eq!(
|
||||
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
|
||||
0,
|
||||
"Push constant offset must be aligned to 4 bytes."
|
||||
);
|
||||
assert_eq!(
|
||||
size_bytes & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
|
||||
0,
|
||||
"Push constant size must be aligned to 4 bytes."
|
||||
);
|
||||
let data_slice = unsafe { slice::from_raw_parts(data, size_bytes as usize) };
|
||||
let value_offset = pass.base.push_constant_data.len().try_into().expect(
|
||||
"Ran out of push constant space. Don't set 4gb of push constants per ComputePass.",
|
||||
);
|
||||
|
||||
pass.base.push_constant_data.extend(
|
||||
data_slice
|
||||
.chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
|
||||
.map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
|
||||
);
|
||||
|
||||
pass.base.commands.push(ComputeCommand::SetPushConstant {
|
||||
offset,
|
||||
size_bytes,
|
||||
values_offset: value_offset,
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_dispatch_workgroups(
|
||||
pass: &mut ComputePass,
|
||||
groups_x: u32,
|
||||
groups_y: u32,
|
||||
groups_z: u32,
|
||||
) {
|
||||
pass.base
|
||||
.commands
|
||||
.push(ComputeCommand::Dispatch([groups_x, groups_y, groups_z]));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_dispatch_workgroups_indirect(
|
||||
pass: &mut ComputePass,
|
||||
buffer_id: id::BufferId,
|
||||
offset: BufferAddress,
|
||||
) {
|
||||
pass.base
|
||||
.commands
|
||||
.push(ComputeCommand::DispatchIndirect { buffer_id, offset });
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given `label`
|
||||
/// is a valid null-terminated string.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_push_debug_group(
|
||||
pass: &mut ComputePass,
|
||||
label: RawString,
|
||||
color: u32,
|
||||
) {
|
||||
let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes();
|
||||
pass.base.string_data.extend_from_slice(bytes);
|
||||
|
||||
pass.base.commands.push(ComputeCommand::PushDebugGroup {
|
||||
color,
|
||||
len: bytes.len(),
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_pop_debug_group(pass: &mut ComputePass) {
|
||||
pass.base.commands.push(ComputeCommand::PopDebugGroup);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe as there is no guarantee that the given `label`
|
||||
/// is a valid null-terminated string.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn wgpu_compute_pass_insert_debug_marker(
|
||||
pass: &mut ComputePass,
|
||||
label: RawString,
|
||||
color: u32,
|
||||
) {
|
||||
let bytes = unsafe { ffi::CStr::from_ptr(label) }.to_bytes();
|
||||
pass.base.string_data.extend_from_slice(bytes);
|
||||
|
||||
pass.base.commands.push(ComputeCommand::InsertDebugMarker {
|
||||
color,
|
||||
len: bytes.len(),
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_write_timestamp(
|
||||
pass: &mut ComputePass,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
) {
|
||||
pass.base.commands.push(ComputeCommand::WriteTimestamp {
|
||||
query_set_id,
|
||||
query_index,
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_begin_pipeline_statistics_query(
|
||||
pass: &mut ComputePass,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
) {
|
||||
pass.base
|
||||
.commands
|
||||
.push(ComputeCommand::BeginPipelineStatisticsQuery {
|
||||
query_set_id,
|
||||
query_index,
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn wgpu_compute_pass_end_pipeline_statistics_query(pass: &mut ComputePass) {
|
||||
pass.base
|
||||
.commands
|
||||
.push(ComputeCommand::EndPipelineStatisticsQuery);
|
||||
}
|
||||
}
|
||||
252
third-party/vendor/wgpu-core/src/command/draw.rs
vendored
Normal file
252
third-party/vendor/wgpu-core/src/command/draw.rs
vendored
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
/*! Draw structures - shared between render passes and bundles.
|
||||
!*/
|
||||
|
||||
use crate::{
|
||||
binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError},
|
||||
error::ErrorFormatter,
|
||||
id,
|
||||
track::UsageConflict,
|
||||
validation::{MissingBufferUsageError, MissingTextureUsageError},
|
||||
};
|
||||
use wgt::{BufferAddress, BufferSize, Color};
|
||||
|
||||
use std::num::NonZeroU32;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Error validating a draw call.
|
||||
#[derive(Clone, Debug, Error, Eq, PartialEq)]
|
||||
#[non_exhaustive]
|
||||
pub enum DrawError {
|
||||
#[error("Blend constant needs to be set")]
|
||||
MissingBlendConstant,
|
||||
#[error("Render pipeline must be set")]
|
||||
MissingPipeline,
|
||||
#[error("Vertex buffer {index} must be set")]
|
||||
MissingVertexBuffer { index: u32 },
|
||||
#[error("Index buffer must be set")]
|
||||
MissingIndexBuffer,
|
||||
#[error("The pipeline layout, associated with the current render pipeline, contains a bind group layout at index {index} which is incompatible with the bind group layout associated with the bind group at {index}")]
|
||||
IncompatibleBindGroup {
|
||||
index: u32,
|
||||
//expected: BindGroupLayoutId,
|
||||
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
|
||||
},
|
||||
#[error("Vertex {last_vertex} extends beyond limit {vertex_limit} imposed by the buffer in slot {slot}. Did you bind the correct `Vertex` step-rate vertex buffer?")]
|
||||
VertexBeyondLimit {
|
||||
last_vertex: u32,
|
||||
vertex_limit: u32,
|
||||
slot: u32,
|
||||
},
|
||||
#[error("Instance {last_instance} extends beyond limit {instance_limit} imposed by the buffer in slot {slot}. Did you bind the correct `Instance` step-rate vertex buffer?")]
|
||||
InstanceBeyondLimit {
|
||||
last_instance: u32,
|
||||
instance_limit: u32,
|
||||
slot: u32,
|
||||
},
|
||||
#[error("Index {last_index} extends beyond limit {index_limit}. Did you bind the correct index buffer?")]
|
||||
IndexBeyondLimit { last_index: u32, index_limit: u32 },
|
||||
#[error(
|
||||
"Pipeline index format ({pipeline:?}) and buffer index format ({buffer:?}) do not match"
|
||||
)]
|
||||
UnmatchedIndexFormats {
|
||||
pipeline: wgt::IndexFormat,
|
||||
buffer: wgt::IndexFormat,
|
||||
},
|
||||
#[error(transparent)]
|
||||
BindingSizeTooSmall(#[from] LateMinBufferBindingSizeMismatch),
|
||||
}
|
||||
|
||||
/// Error encountered when encoding a render command.
|
||||
/// This is the shared error set between render bundles and passes.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum RenderCommandError {
|
||||
#[error("Bind group {0:?} is invalid")]
|
||||
InvalidBindGroup(id::BindGroupId),
|
||||
#[error("Render bundle {0:?} is invalid")]
|
||||
InvalidRenderBundle(id::RenderBundleId),
|
||||
#[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
|
||||
BindGroupIndexOutOfRange { index: u32, max: u32 },
|
||||
#[error("Dynamic buffer offset {0} does not respect device's requested `{1}` limit {2}")]
|
||||
UnalignedBufferOffset(u64, &'static str, u32),
|
||||
#[error("Number of buffer offsets ({actual}) does not match the number of dynamic bindings ({expected})")]
|
||||
InvalidDynamicOffsetCount { actual: usize, expected: usize },
|
||||
#[error("Render pipeline {0:?} is invalid")]
|
||||
InvalidPipeline(id::RenderPipelineId),
|
||||
#[error("QuerySet {0:?} is invalid")]
|
||||
InvalidQuerySet(id::QuerySetId),
|
||||
#[error("Render pipeline targets are incompatible with render pass")]
|
||||
IncompatiblePipelineTargets(#[from] crate::device::RenderPassCompatibilityError),
|
||||
#[error("Pipeline writes to depth/stencil, while the pass has read-only depth/stencil")]
|
||||
IncompatiblePipelineRods,
|
||||
#[error(transparent)]
|
||||
UsageConflict(#[from] UsageConflict),
|
||||
#[error("Buffer {0:?} is destroyed")]
|
||||
DestroyedBuffer(id::BufferId),
|
||||
#[error(transparent)]
|
||||
MissingBufferUsage(#[from] MissingBufferUsageError),
|
||||
#[error(transparent)]
|
||||
MissingTextureUsage(#[from] MissingTextureUsageError),
|
||||
#[error(transparent)]
|
||||
PushConstants(#[from] PushConstantUploadError),
|
||||
#[error("Viewport width {0} and/or height {1} are less than or equal to 0")]
|
||||
InvalidViewportDimension(f32, f32),
|
||||
#[error("Viewport minDepth {0} and/or maxDepth {1} are not in [0, 1]")]
|
||||
InvalidViewportDepth(f32, f32),
|
||||
#[error("Scissor {0:?} is not contained in the render target {1:?}")]
|
||||
InvalidScissorRect(Rect<u32>, wgt::Extent3d),
|
||||
#[error("Support for {0} is not implemented yet")]
|
||||
Unimplemented(&'static str),
|
||||
}
|
||||
impl crate::error::PrettyError for RenderCommandError {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
match *self {
|
||||
Self::InvalidBindGroup(id) => {
|
||||
fmt.bind_group_label(&id);
|
||||
}
|
||||
Self::InvalidPipeline(id) => {
|
||||
fmt.render_pipeline_label(&id);
|
||||
}
|
||||
Self::UsageConflict(UsageConflict::TextureInvalid { id }) => {
|
||||
fmt.texture_label(&id);
|
||||
}
|
||||
Self::UsageConflict(UsageConflict::BufferInvalid { id })
|
||||
| Self::DestroyedBuffer(id) => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "trace"),
|
||||
derive(serde::Serialize)
|
||||
)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "replay"),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
pub struct Rect<T> {
|
||||
pub x: T,
|
||||
pub y: T,
|
||||
pub w: T,
|
||||
pub h: T,
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "trace"),
|
||||
derive(serde::Serialize)
|
||||
)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "replay"),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
pub enum RenderCommand {
|
||||
SetBindGroup {
|
||||
index: u32,
|
||||
num_dynamic_offsets: u8,
|
||||
bind_group_id: id::BindGroupId,
|
||||
},
|
||||
SetPipeline(id::RenderPipelineId),
|
||||
SetIndexBuffer {
|
||||
buffer_id: id::BufferId,
|
||||
index_format: wgt::IndexFormat,
|
||||
offset: BufferAddress,
|
||||
size: Option<BufferSize>,
|
||||
},
|
||||
SetVertexBuffer {
|
||||
slot: u32,
|
||||
buffer_id: id::BufferId,
|
||||
offset: BufferAddress,
|
||||
size: Option<BufferSize>,
|
||||
},
|
||||
SetBlendConstant(Color),
|
||||
SetStencilReference(u32),
|
||||
SetViewport {
|
||||
rect: Rect<f32>,
|
||||
//TODO: use half-float to reduce the size?
|
||||
depth_min: f32,
|
||||
depth_max: f32,
|
||||
},
|
||||
SetScissor(Rect<u32>),
|
||||
|
||||
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
|
||||
///
|
||||
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
|
||||
/// of the restrictions these commands must satisfy.
|
||||
SetPushConstant {
|
||||
/// Which stages we are setting push constant values for.
|
||||
stages: wgt::ShaderStages,
|
||||
|
||||
/// The byte offset within the push constant storage to write to. This
|
||||
/// must be a multiple of four.
|
||||
offset: u32,
|
||||
|
||||
/// The number of bytes to write. This must be a multiple of four.
|
||||
size_bytes: u32,
|
||||
|
||||
/// Index in [`BasePass::push_constant_data`] of the start of the data
|
||||
/// to be written.
|
||||
///
|
||||
/// Note: this is not a byte offset like `offset`. Rather, it is the
|
||||
/// index of the first `u32` element in `push_constant_data` to read.
|
||||
///
|
||||
/// `None` means zeros should be written to the destination range, and
|
||||
/// there is no corresponding data in `push_constant_data`. This is used
|
||||
/// by render bundles, which explicitly clear out any state that
|
||||
/// post-bundle code might see.
|
||||
values_offset: Option<u32>,
|
||||
},
|
||||
Draw {
|
||||
vertex_count: u32,
|
||||
instance_count: u32,
|
||||
first_vertex: u32,
|
||||
first_instance: u32,
|
||||
},
|
||||
DrawIndexed {
|
||||
index_count: u32,
|
||||
instance_count: u32,
|
||||
first_index: u32,
|
||||
base_vertex: i32,
|
||||
first_instance: u32,
|
||||
},
|
||||
MultiDrawIndirect {
|
||||
buffer_id: id::BufferId,
|
||||
offset: BufferAddress,
|
||||
/// Count of `None` represents a non-multi call.
|
||||
count: Option<NonZeroU32>,
|
||||
indexed: bool,
|
||||
},
|
||||
MultiDrawIndirectCount {
|
||||
buffer_id: id::BufferId,
|
||||
offset: BufferAddress,
|
||||
count_buffer_id: id::BufferId,
|
||||
count_buffer_offset: BufferAddress,
|
||||
max_count: u32,
|
||||
indexed: bool,
|
||||
},
|
||||
PushDebugGroup {
|
||||
color: u32,
|
||||
len: usize,
|
||||
},
|
||||
PopDebugGroup,
|
||||
InsertDebugMarker {
|
||||
color: u32,
|
||||
len: usize,
|
||||
},
|
||||
WriteTimestamp {
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
},
|
||||
BeginPipelineStatisticsQuery {
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
},
|
||||
EndPipelineStatisticsQuery,
|
||||
ExecuteBundle(id::RenderBundleId),
|
||||
}
|
||||
338
third-party/vendor/wgpu-core/src/command/memory_init.rs
vendored
Normal file
338
third-party/vendor/wgpu-core/src/command/memory_init.rs
vendored
Normal file
|
|
@ -0,0 +1,338 @@
|
|||
use std::{collections::hash_map::Entry, ops::Range, vec::Drain};
|
||||
|
||||
use hal::CommandEncoder;
|
||||
|
||||
use crate::{
|
||||
device::Device,
|
||||
hal_api::HalApi,
|
||||
id::{self, TextureId},
|
||||
init_tracker::*,
|
||||
resource::{Buffer, Texture},
|
||||
storage::Storage,
|
||||
track::{TextureTracker, Tracker},
|
||||
FastHashMap,
|
||||
};
|
||||
|
||||
use super::{clear::clear_texture, BakedCommands, DestroyedBufferError, DestroyedTextureError};
|
||||
|
||||
/// Surface that was discarded by `StoreOp::Discard` of a preceding renderpass.
|
||||
/// Any read access to this surface needs to be preceded by a texture initialization.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct TextureSurfaceDiscard {
|
||||
pub texture: TextureId,
|
||||
pub mip_level: u32,
|
||||
pub layer: u32,
|
||||
}
|
||||
|
||||
pub(crate) type SurfacesInDiscardState = Vec<TextureSurfaceDiscard>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct CommandBufferTextureMemoryActions {
|
||||
/// The tracker actions that we need to be executed before the command
|
||||
/// buffer is executed.
|
||||
init_actions: Vec<TextureInitTrackerAction>,
|
||||
/// All the discards that haven't been followed by init again within the
|
||||
/// command buffer i.e. everything in this list resets the texture init
|
||||
/// state *after* the command buffer execution
|
||||
discards: Vec<TextureSurfaceDiscard>,
|
||||
}
|
||||
|
||||
impl CommandBufferTextureMemoryActions {
|
||||
pub(crate) fn drain_init_actions(&mut self) -> Drain<TextureInitTrackerAction> {
|
||||
self.init_actions.drain(..)
|
||||
}
|
||||
|
||||
pub(crate) fn discard(&mut self, discard: TextureSurfaceDiscard) {
|
||||
self.discards.push(discard);
|
||||
}
|
||||
|
||||
// Registers a TextureInitTrackerAction.
|
||||
// Returns previously discarded surface that need to be initialized *immediately* now.
|
||||
// Only returns a non-empty list if action is MemoryInitKind::NeedsInitializedMemory.
|
||||
#[must_use]
|
||||
pub(crate) fn register_init_action<A: hal::Api>(
|
||||
&mut self,
|
||||
action: &TextureInitTrackerAction,
|
||||
texture_guard: &Storage<Texture<A>, TextureId>,
|
||||
) -> SurfacesInDiscardState {
|
||||
let mut immediately_necessary_clears = SurfacesInDiscardState::new();
|
||||
|
||||
// Note that within a command buffer we may stack arbitrary memory init
|
||||
// actions on the same texture Since we react to them in sequence, they
|
||||
// are going to be dropped again at queue submit
|
||||
//
|
||||
// We don't need to add MemoryInitKind::NeedsInitializedMemory to
|
||||
// init_actions if a surface is part of the discard list. But that would
|
||||
// mean splitting up the action which is more than we'd win here.
|
||||
self.init_actions
|
||||
.extend(match texture_guard.get(action.id) {
|
||||
Ok(texture) => texture.initialization_status.check_action(action),
|
||||
Err(_) => return immediately_necessary_clears, // texture no longer exists
|
||||
});
|
||||
|
||||
// We expect very few discarded surfaces at any point in time which is
|
||||
// why a simple linear search is likely best. (i.e. most of the time
|
||||
// self.discards is empty!)
|
||||
let init_actions = &mut self.init_actions;
|
||||
self.discards.retain(|discarded_surface| {
|
||||
if discarded_surface.texture == action.id
|
||||
&& action.range.layer_range.contains(&discarded_surface.layer)
|
||||
&& action
|
||||
.range
|
||||
.mip_range
|
||||
.contains(&discarded_surface.mip_level)
|
||||
{
|
||||
if let MemoryInitKind::NeedsInitializedMemory = action.kind {
|
||||
immediately_necessary_clears.push(discarded_surface.clone());
|
||||
|
||||
// Mark surface as implicitly initialized (this is relevant
|
||||
// because it might have been uninitialized prior to
|
||||
// discarding
|
||||
init_actions.push(TextureInitTrackerAction {
|
||||
id: discarded_surface.texture,
|
||||
range: TextureInitRange {
|
||||
mip_range: discarded_surface.mip_level
|
||||
..(discarded_surface.mip_level + 1),
|
||||
layer_range: discarded_surface.layer..(discarded_surface.layer + 1),
|
||||
},
|
||||
kind: MemoryInitKind::ImplicitlyInitialized,
|
||||
});
|
||||
}
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
immediately_necessary_clears
|
||||
}
|
||||
|
||||
// Shortcut for register_init_action when it is known that the action is an
|
||||
// implicit init, not requiring any immediate resource init.
|
||||
pub(crate) fn register_implicit_init<A: hal::Api>(
|
||||
&mut self,
|
||||
id: id::Valid<TextureId>,
|
||||
range: TextureInitRange,
|
||||
texture_guard: &Storage<Texture<A>, TextureId>,
|
||||
) {
|
||||
let must_be_empty = self.register_init_action(
|
||||
&TextureInitTrackerAction {
|
||||
id: id.0,
|
||||
range,
|
||||
kind: MemoryInitKind::ImplicitlyInitialized,
|
||||
},
|
||||
texture_guard,
|
||||
);
|
||||
assert!(must_be_empty.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Utility function that takes discarded surfaces from (several calls to)
|
||||
// register_init_action and initializes them on the spot.
|
||||
//
|
||||
// Takes care of barriers as well!
|
||||
pub(crate) fn fixup_discarded_surfaces<
|
||||
A: HalApi,
|
||||
InitIter: Iterator<Item = TextureSurfaceDiscard>,
|
||||
>(
|
||||
inits: InitIter,
|
||||
encoder: &mut A::CommandEncoder,
|
||||
texture_guard: &Storage<Texture<A>, TextureId>,
|
||||
texture_tracker: &mut TextureTracker<A>,
|
||||
device: &Device<A>,
|
||||
) {
|
||||
for init in inits {
|
||||
clear_texture(
|
||||
texture_guard,
|
||||
id::Valid(init.texture),
|
||||
TextureInitRange {
|
||||
mip_range: init.mip_level..(init.mip_level + 1),
|
||||
layer_range: init.layer..(init.layer + 1),
|
||||
},
|
||||
encoder,
|
||||
texture_tracker,
|
||||
&device.alignments,
|
||||
&device.zero_buffer,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: HalApi> BakedCommands<A> {
|
||||
// inserts all buffer initializations that are going to be needed for
|
||||
// executing the commands and updates resource init states accordingly
|
||||
pub(crate) fn initialize_buffer_memory(
|
||||
&mut self,
|
||||
device_tracker: &mut Tracker<A>,
|
||||
buffer_guard: &mut Storage<Buffer<A>, id::BufferId>,
|
||||
) -> Result<(), DestroyedBufferError> {
|
||||
// Gather init ranges for each buffer so we can collapse them.
|
||||
// It is not possible to do this at an earlier point since previously
|
||||
// executed command buffer change the resource init state.
|
||||
let mut uninitialized_ranges_per_buffer = FastHashMap::default();
|
||||
for buffer_use in self.buffer_memory_init_actions.drain(..) {
|
||||
let buffer = buffer_guard
|
||||
.get_mut(buffer_use.id)
|
||||
.map_err(|_| DestroyedBufferError(buffer_use.id))?;
|
||||
|
||||
// align the end to 4
|
||||
let end_remainder = buffer_use.range.end % wgt::COPY_BUFFER_ALIGNMENT;
|
||||
let end = if end_remainder == 0 {
|
||||
buffer_use.range.end
|
||||
} else {
|
||||
buffer_use.range.end + wgt::COPY_BUFFER_ALIGNMENT - end_remainder
|
||||
};
|
||||
let uninitialized_ranges = buffer
|
||||
.initialization_status
|
||||
.drain(buffer_use.range.start..end);
|
||||
|
||||
match buffer_use.kind {
|
||||
MemoryInitKind::ImplicitlyInitialized => {}
|
||||
MemoryInitKind::NeedsInitializedMemory => {
|
||||
match uninitialized_ranges_per_buffer.entry(buffer_use.id) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert(
|
||||
uninitialized_ranges.collect::<Vec<Range<wgt::BufferAddress>>>(),
|
||||
);
|
||||
}
|
||||
Entry::Occupied(mut e) => {
|
||||
e.get_mut().extend(uninitialized_ranges);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (buffer_id, mut ranges) in uninitialized_ranges_per_buffer {
|
||||
// Collapse touching ranges.
|
||||
ranges.sort_by_key(|r| r.start);
|
||||
for i in (1..ranges.len()).rev() {
|
||||
// The memory init tracker made sure of this!
|
||||
assert!(ranges[i - 1].end <= ranges[i].start);
|
||||
if ranges[i].start == ranges[i - 1].end {
|
||||
ranges[i - 1].end = ranges[i].end;
|
||||
ranges.swap_remove(i); // Ordering not important at this point
|
||||
}
|
||||
}
|
||||
|
||||
// Don't do use_replace since the buffer may already no longer have
|
||||
// a ref_count.
|
||||
//
|
||||
// However, we *know* that it is currently in use, so the tracker
|
||||
// must already know about it.
|
||||
let transition = device_tracker
|
||||
.buffers
|
||||
.set_single(buffer_guard, buffer_id, hal::BufferUses::COPY_DST)
|
||||
.unwrap()
|
||||
.1;
|
||||
|
||||
let buffer = buffer_guard
|
||||
.get_mut(buffer_id)
|
||||
.map_err(|_| DestroyedBufferError(buffer_id))?;
|
||||
let raw_buf = buffer.raw.as_ref().ok_or(DestroyedBufferError(buffer_id))?;
|
||||
|
||||
unsafe {
|
||||
self.encoder.transition_buffers(
|
||||
transition
|
||||
.map(|pending| pending.into_hal(buffer))
|
||||
.into_iter(),
|
||||
);
|
||||
}
|
||||
|
||||
for range in ranges.iter() {
|
||||
assert!(
|
||||
range.start % wgt::COPY_BUFFER_ALIGNMENT == 0,
|
||||
"Buffer {:?} has an uninitialized range with a start \
|
||||
not aligned to 4 (start was {})",
|
||||
raw_buf,
|
||||
range.start
|
||||
);
|
||||
assert!(
|
||||
range.end % wgt::COPY_BUFFER_ALIGNMENT == 0,
|
||||
"Buffer {:?} has an uninitialized range with an end \
|
||||
not aligned to 4 (end was {})",
|
||||
raw_buf,
|
||||
range.end
|
||||
);
|
||||
|
||||
unsafe {
|
||||
self.encoder.clear_buffer(raw_buf, range.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// inserts all texture initializations that are going to be needed for
|
||||
// executing the commands and updates resource init states accordingly any
|
||||
// textures that are left discarded by this command buffer will be marked as
|
||||
// uninitialized
|
||||
pub(crate) fn initialize_texture_memory(
|
||||
&mut self,
|
||||
device_tracker: &mut Tracker<A>,
|
||||
texture_guard: &mut Storage<Texture<A>, TextureId>,
|
||||
device: &Device<A>,
|
||||
) -> Result<(), DestroyedTextureError> {
|
||||
let mut ranges: Vec<TextureInitRange> = Vec::new();
|
||||
for texture_use in self.texture_memory_actions.drain_init_actions() {
|
||||
let texture = texture_guard
|
||||
.get_mut(texture_use.id)
|
||||
.map_err(|_| DestroyedTextureError(texture_use.id))?;
|
||||
|
||||
let use_range = texture_use.range;
|
||||
let affected_mip_trackers = texture
|
||||
.initialization_status
|
||||
.mips
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
.skip(use_range.mip_range.start as usize)
|
||||
.take((use_range.mip_range.end - use_range.mip_range.start) as usize);
|
||||
|
||||
match texture_use.kind {
|
||||
MemoryInitKind::ImplicitlyInitialized => {
|
||||
for (_, mip_tracker) in affected_mip_trackers {
|
||||
mip_tracker.drain(use_range.layer_range.clone());
|
||||
}
|
||||
}
|
||||
MemoryInitKind::NeedsInitializedMemory => {
|
||||
for (mip_level, mip_tracker) in affected_mip_trackers {
|
||||
for layer_range in mip_tracker.drain(use_range.layer_range.clone()) {
|
||||
ranges.push(TextureInitRange {
|
||||
mip_range: (mip_level as u32)..(mip_level as u32 + 1),
|
||||
layer_range,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Could we attempt some range collapsing here?
|
||||
for range in ranges.drain(..) {
|
||||
clear_texture(
|
||||
texture_guard,
|
||||
id::Valid(texture_use.id),
|
||||
range,
|
||||
&mut self.encoder,
|
||||
&mut device_tracker.textures,
|
||||
&device.alignments,
|
||||
&device.zero_buffer,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// Now that all buffers/textures have the proper init state for before
|
||||
// cmdbuf start, we discard init states for textures it left discarded
|
||||
// after its execution.
|
||||
for surface_discard in self.texture_memory_actions.discards.iter() {
|
||||
let texture = texture_guard
|
||||
.get_mut(surface_discard.texture)
|
||||
.map_err(|_| DestroyedTextureError(surface_discard.texture))?;
|
||||
texture
|
||||
.initialization_status
|
||||
.discard(surface_discard.mip_level, surface_discard.layer);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
645
third-party/vendor/wgpu-core/src/command/mod.rs
vendored
Normal file
645
third-party/vendor/wgpu-core/src/command/mod.rs
vendored
Normal file
|
|
@ -0,0 +1,645 @@
|
|||
mod bind;
|
||||
mod bundle;
|
||||
mod clear;
|
||||
mod compute;
|
||||
mod draw;
|
||||
mod memory_init;
|
||||
mod query;
|
||||
mod render;
|
||||
mod transfer;
|
||||
|
||||
use std::slice;
|
||||
|
||||
pub(crate) use self::clear::clear_texture;
|
||||
pub use self::{
|
||||
bundle::*, clear::ClearError, compute::*, draw::*, query::*, render::*, transfer::*,
|
||||
};
|
||||
|
||||
use self::memory_init::CommandBufferTextureMemoryActions;
|
||||
|
||||
use crate::error::{ErrorFormatter, PrettyError};
|
||||
use crate::init_tracker::BufferInitTrackerAction;
|
||||
use crate::track::{Tracker, UsageScope};
|
||||
use crate::{
|
||||
global::Global,
|
||||
hal_api::HalApi,
|
||||
hub::Token,
|
||||
id,
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
resource::{Buffer, Texture},
|
||||
storage::Storage,
|
||||
Label, Stored,
|
||||
};
|
||||
|
||||
use hal::CommandEncoder as _;
|
||||
use thiserror::Error;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
use crate::device::trace::Command as TraceCommand;
|
||||
|
||||
const PUSH_CONSTANT_CLEAR_ARRAY: &[u32] = &[0_u32; 64];
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CommandEncoderStatus {
|
||||
Recording,
|
||||
Finished,
|
||||
Error,
|
||||
}
|
||||
|
||||
struct CommandEncoder<A: hal::Api> {
|
||||
raw: A::CommandEncoder,
|
||||
list: Vec<A::CommandBuffer>,
|
||||
is_open: bool,
|
||||
label: Option<String>,
|
||||
}
|
||||
|
||||
//TODO: handle errors better
|
||||
impl<A: hal::Api> CommandEncoder<A> {
|
||||
/// Closes the live encoder
|
||||
fn close_and_swap(&mut self) {
|
||||
if self.is_open {
|
||||
self.is_open = false;
|
||||
let new = unsafe { self.raw.end_encoding().unwrap() };
|
||||
self.list.insert(self.list.len() - 1, new);
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self) {
|
||||
if self.is_open {
|
||||
self.is_open = false;
|
||||
let cmd_buf = unsafe { self.raw.end_encoding().unwrap() };
|
||||
self.list.push(cmd_buf);
|
||||
}
|
||||
}
|
||||
|
||||
fn discard(&mut self) {
|
||||
if self.is_open {
|
||||
self.is_open = false;
|
||||
unsafe { self.raw.discard_encoding() };
|
||||
}
|
||||
}
|
||||
|
||||
fn open(&mut self) -> &mut A::CommandEncoder {
|
||||
if !self.is_open {
|
||||
self.is_open = true;
|
||||
let label = self.label.as_deref();
|
||||
unsafe { self.raw.begin_encoding(label).unwrap() };
|
||||
}
|
||||
&mut self.raw
|
||||
}
|
||||
|
||||
fn open_pass(&mut self, label: Option<&str>) {
|
||||
self.is_open = true;
|
||||
unsafe { self.raw.begin_encoding(label).unwrap() };
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BakedCommands<A: HalApi> {
|
||||
pub(crate) encoder: A::CommandEncoder,
|
||||
pub(crate) list: Vec<A::CommandBuffer>,
|
||||
pub(crate) trackers: Tracker<A>,
|
||||
buffer_memory_init_actions: Vec<BufferInitTrackerAction>,
|
||||
texture_memory_actions: CommandBufferTextureMemoryActions,
|
||||
}
|
||||
|
||||
pub(crate) struct DestroyedBufferError(pub id::BufferId);
|
||||
pub(crate) struct DestroyedTextureError(pub id::TextureId);
|
||||
|
||||
pub struct CommandBuffer<A: HalApi> {
|
||||
encoder: CommandEncoder<A>,
|
||||
status: CommandEncoderStatus,
|
||||
pub(crate) device_id: Stored<id::DeviceId>,
|
||||
pub(crate) trackers: Tracker<A>,
|
||||
buffer_memory_init_actions: Vec<BufferInitTrackerAction>,
|
||||
texture_memory_actions: CommandBufferTextureMemoryActions,
|
||||
limits: wgt::Limits,
|
||||
support_clear_texture: bool,
|
||||
#[cfg(feature = "trace")]
|
||||
pub(crate) commands: Option<Vec<TraceCommand>>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> CommandBuffer<A> {
|
||||
pub(crate) fn new(
|
||||
encoder: A::CommandEncoder,
|
||||
device_id: Stored<id::DeviceId>,
|
||||
limits: wgt::Limits,
|
||||
_downlevel: wgt::DownlevelCapabilities,
|
||||
features: wgt::Features,
|
||||
#[cfg(feature = "trace")] enable_tracing: bool,
|
||||
label: &Label,
|
||||
) -> Self {
|
||||
CommandBuffer {
|
||||
encoder: CommandEncoder {
|
||||
raw: encoder,
|
||||
is_open: false,
|
||||
list: Vec::new(),
|
||||
label: crate::LabelHelpers::borrow_option(label).map(|s| s.to_string()),
|
||||
},
|
||||
status: CommandEncoderStatus::Recording,
|
||||
device_id,
|
||||
trackers: Tracker::new(),
|
||||
buffer_memory_init_actions: Default::default(),
|
||||
texture_memory_actions: Default::default(),
|
||||
limits,
|
||||
support_clear_texture: features.contains(wgt::Features::CLEAR_TEXTURE),
|
||||
#[cfg(feature = "trace")]
|
||||
commands: if enable_tracing {
|
||||
Some(Vec::new())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert_barriers_from_tracker(
|
||||
raw: &mut A::CommandEncoder,
|
||||
base: &mut Tracker<A>,
|
||||
head: &Tracker<A>,
|
||||
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
|
||||
texture_guard: &Storage<Texture<A>, id::TextureId>,
|
||||
) {
|
||||
profiling::scope!("insert_barriers");
|
||||
|
||||
base.buffers.set_from_tracker(&head.buffers);
|
||||
base.textures
|
||||
.set_from_tracker(texture_guard, &head.textures);
|
||||
|
||||
Self::drain_barriers(raw, base, buffer_guard, texture_guard);
|
||||
}
|
||||
|
||||
pub(crate) fn insert_barriers_from_scope(
|
||||
raw: &mut A::CommandEncoder,
|
||||
base: &mut Tracker<A>,
|
||||
head: &UsageScope<A>,
|
||||
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
|
||||
texture_guard: &Storage<Texture<A>, id::TextureId>,
|
||||
) {
|
||||
profiling::scope!("insert_barriers");
|
||||
|
||||
base.buffers.set_from_usage_scope(&head.buffers);
|
||||
base.textures
|
||||
.set_from_usage_scope(texture_guard, &head.textures);
|
||||
|
||||
Self::drain_barriers(raw, base, buffer_guard, texture_guard);
|
||||
}
|
||||
|
||||
pub(crate) fn drain_barriers(
|
||||
raw: &mut A::CommandEncoder,
|
||||
base: &mut Tracker<A>,
|
||||
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
|
||||
texture_guard: &Storage<Texture<A>, id::TextureId>,
|
||||
) {
|
||||
profiling::scope!("drain_barriers");
|
||||
|
||||
let buffer_barriers = base.buffers.drain().map(|pending| {
|
||||
let buf = unsafe { &buffer_guard.get_unchecked(pending.id) };
|
||||
pending.into_hal(buf)
|
||||
});
|
||||
let texture_barriers = base.textures.drain().map(|pending| {
|
||||
let tex = unsafe { texture_guard.get_unchecked(pending.id) };
|
||||
pending.into_hal(tex)
|
||||
});
|
||||
|
||||
unsafe {
|
||||
raw.transition_buffers(buffer_barriers);
|
||||
raw.transition_textures(texture_barriers);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: HalApi> CommandBuffer<A> {
|
||||
fn get_encoder_mut(
|
||||
storage: &mut Storage<Self, id::CommandEncoderId>,
|
||||
id: id::CommandEncoderId,
|
||||
) -> Result<&mut Self, CommandEncoderError> {
|
||||
match storage.get_mut(id) {
|
||||
Ok(cmd_buf) => match cmd_buf.status {
|
||||
CommandEncoderStatus::Recording => Ok(cmd_buf),
|
||||
CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording),
|
||||
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
|
||||
},
|
||||
Err(_) => Err(CommandEncoderError::Invalid),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_finished(&self) -> bool {
|
||||
match self.status {
|
||||
CommandEncoderStatus::Finished => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_baked(self) -> BakedCommands<A> {
|
||||
BakedCommands {
|
||||
encoder: self.encoder.raw,
|
||||
list: self.encoder.list,
|
||||
trackers: self.trackers,
|
||||
buffer_memory_init_actions: self.buffer_memory_init_actions,
|
||||
texture_memory_actions: self.texture_memory_actions,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: HalApi> crate::resource::Resource for CommandBuffer<A> {
|
||||
const TYPE: &'static str = "CommandBuffer";
|
||||
|
||||
fn life_guard(&self) -> &crate::LifeGuard {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn label(&self) -> &str {
|
||||
self.encoder.label.as_ref().map_or("", |s| s.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct BasePassRef<'a, C> {
|
||||
pub label: Option<&'a str>,
|
||||
pub commands: &'a [C],
|
||||
pub dynamic_offsets: &'a [wgt::DynamicOffset],
|
||||
pub string_data: &'a [u8],
|
||||
pub push_constant_data: &'a [u32],
|
||||
}
|
||||
|
||||
/// A stream of commands for a render pass or compute pass.
|
||||
///
|
||||
/// This also contains side tables referred to by certain commands,
|
||||
/// like dynamic offsets for [`SetBindGroup`] or string data for
|
||||
/// [`InsertDebugMarker`].
|
||||
///
|
||||
/// Render passes use `BasePass<RenderCommand>`, whereas compute
|
||||
/// passes use `BasePass<ComputeCommand>`.
|
||||
///
|
||||
/// [`SetBindGroup`]: RenderCommand::SetBindGroup
|
||||
/// [`InsertDebugMarker`]: RenderCommand::InsertDebugMarker
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "trace"),
|
||||
derive(serde::Serialize)
|
||||
)]
|
||||
#[cfg_attr(
|
||||
any(feature = "serial-pass", feature = "replay"),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
pub struct BasePass<C> {
|
||||
pub label: Option<String>,
|
||||
|
||||
/// The stream of commands.
|
||||
pub commands: Vec<C>,
|
||||
|
||||
/// Dynamic offsets consumed by [`SetBindGroup`] commands in `commands`.
|
||||
///
|
||||
/// Each successive `SetBindGroup` consumes the next
|
||||
/// [`num_dynamic_offsets`] values from this list.
|
||||
pub dynamic_offsets: Vec<wgt::DynamicOffset>,
|
||||
|
||||
/// Strings used by debug instructions.
|
||||
///
|
||||
/// Each successive [`PushDebugGroup`] or [`InsertDebugMarker`]
|
||||
/// instruction consumes the next `len` bytes from this vector.
|
||||
pub string_data: Vec<u8>,
|
||||
|
||||
/// Data used by `SetPushConstant` instructions.
|
||||
///
|
||||
/// See the documentation for [`RenderCommand::SetPushConstant`]
|
||||
/// and [`ComputeCommand::SetPushConstant`] for details.
|
||||
pub push_constant_data: Vec<u32>,
|
||||
}
|
||||
|
||||
impl<C: Clone> BasePass<C> {
|
||||
fn new(label: &Label) -> Self {
|
||||
Self {
|
||||
label: label.as_ref().map(|cow| cow.to_string()),
|
||||
commands: Vec::new(),
|
||||
dynamic_offsets: Vec::new(),
|
||||
string_data: Vec::new(),
|
||||
push_constant_data: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
fn from_ref(base: BasePassRef<C>) -> Self {
|
||||
Self {
|
||||
label: base.label.map(str::to_string),
|
||||
commands: base.commands.to_vec(),
|
||||
dynamic_offsets: base.dynamic_offsets.to_vec(),
|
||||
string_data: base.string_data.to_vec(),
|
||||
push_constant_data: base.push_constant_data.to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_ref(&self) -> BasePassRef<C> {
|
||||
BasePassRef {
|
||||
label: self.label.as_deref(),
|
||||
commands: &self.commands,
|
||||
dynamic_offsets: &self.dynamic_offsets,
|
||||
string_data: &self.string_data,
|
||||
push_constant_data: &self.push_constant_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CommandEncoderError {
|
||||
#[error("Command encoder is invalid")]
|
||||
Invalid,
|
||||
#[error("Command encoder must be active")]
|
||||
NotRecording,
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn command_encoder_finish<A: HalApi>(
|
||||
&self,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
_desc: &wgt::CommandBufferDescriptor<Label>,
|
||||
) -> (id::CommandBufferId, Option<CommandEncoderError>) {
|
||||
profiling::scope!("CommandEncoder::finish");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
|
||||
|
||||
let error = match cmd_buf_guard.get_mut(encoder_id) {
|
||||
Ok(cmd_buf) => match cmd_buf.status {
|
||||
CommandEncoderStatus::Recording => {
|
||||
cmd_buf.encoder.close();
|
||||
cmd_buf.status = CommandEncoderStatus::Finished;
|
||||
//Note: if we want to stop tracking the swapchain texture view,
|
||||
// this is the place to do it.
|
||||
log::trace!("Command buffer {:?}", encoder_id);
|
||||
None
|
||||
}
|
||||
CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording),
|
||||
CommandEncoderStatus::Error => {
|
||||
cmd_buf.encoder.discard();
|
||||
Some(CommandEncoderError::Invalid)
|
||||
}
|
||||
},
|
||||
Err(_) => Some(CommandEncoderError::Invalid),
|
||||
};
|
||||
|
||||
(encoder_id, error)
|
||||
}
|
||||
|
||||
pub fn command_encoder_push_debug_group<A: HalApi>(
|
||||
&self,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
label: &str,
|
||||
) -> Result<(), CommandEncoderError> {
|
||||
profiling::scope!("CommandEncoder::push_debug_group");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::PushDebugGroup(label.to_string()));
|
||||
}
|
||||
|
||||
let cmd_buf_raw = cmd_buf.encoder.open();
|
||||
unsafe {
|
||||
cmd_buf_raw.begin_debug_marker(label);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn command_encoder_insert_debug_marker<A: HalApi>(
|
||||
&self,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
label: &str,
|
||||
) -> Result<(), CommandEncoderError> {
|
||||
profiling::scope!("CommandEncoder::insert_debug_marker");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::InsertDebugMarker(label.to_string()));
|
||||
}
|
||||
|
||||
let cmd_buf_raw = cmd_buf.encoder.open();
|
||||
unsafe {
|
||||
cmd_buf_raw.insert_debug_marker(label);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn command_encoder_pop_debug_group<A: HalApi>(
|
||||
&self,
|
||||
encoder_id: id::CommandEncoderId,
|
||||
) -> Result<(), CommandEncoderError> {
|
||||
profiling::scope!("CommandEncoder::pop_debug_marker");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::PopDebugGroup);
|
||||
}
|
||||
|
||||
let cmd_buf_raw = cmd_buf.encoder.open();
|
||||
unsafe {
|
||||
cmd_buf_raw.end_debug_marker();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn push_constant_clear<PushFn>(offset: u32, size_bytes: u32, mut push_fn: PushFn)
|
||||
where
|
||||
PushFn: FnMut(u32, &[u32]),
|
||||
{
|
||||
let mut count_words = 0_u32;
|
||||
let size_words = size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT;
|
||||
while count_words < size_words {
|
||||
let count_bytes = count_words * wgt::PUSH_CONSTANT_ALIGNMENT;
|
||||
let size_to_write_words =
|
||||
(size_words - count_words).min(PUSH_CONSTANT_CLEAR_ARRAY.len() as u32);
|
||||
|
||||
push_fn(
|
||||
offset + count_bytes,
|
||||
&PUSH_CONSTANT_CLEAR_ARRAY[0..size_to_write_words as usize],
|
||||
);
|
||||
|
||||
count_words += size_to_write_words;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct StateChange<T> {
|
||||
last_state: Option<T>,
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialEq> StateChange<T> {
|
||||
fn new() -> Self {
|
||||
Self { last_state: None }
|
||||
}
|
||||
fn set_and_check_redundant(&mut self, new_state: T) -> bool {
|
||||
let already_set = self.last_state == Some(new_state);
|
||||
self.last_state = Some(new_state);
|
||||
already_set
|
||||
}
|
||||
fn reset(&mut self) {
|
||||
self.last_state = None;
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy + PartialEq> Default for StateChange<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BindGroupStateChange {
|
||||
last_states: [StateChange<id::BindGroupId>; hal::MAX_BIND_GROUPS],
|
||||
}
|
||||
|
||||
impl BindGroupStateChange {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
last_states: [StateChange::new(); hal::MAX_BIND_GROUPS],
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn set_and_check_redundant(
|
||||
&mut self,
|
||||
bind_group_id: id::BindGroupId,
|
||||
index: u32,
|
||||
dynamic_offsets: &mut Vec<u32>,
|
||||
offsets: *const wgt::DynamicOffset,
|
||||
offset_length: usize,
|
||||
) -> bool {
|
||||
// For now never deduplicate bind groups with dynamic offsets.
|
||||
if offset_length == 0 {
|
||||
// If this get returns None, that means we're well over the limit,
|
||||
// so let the call through to get a proper error
|
||||
if let Some(current_bind_group) = self.last_states.get_mut(index as usize) {
|
||||
// Bail out if we're binding the same bind group.
|
||||
if current_bind_group.set_and_check_redundant(bind_group_id) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We intentionally remove the memory of this bind group if we have dynamic offsets,
|
||||
// such that if you try to bind this bind group later with _no_ dynamic offsets it
|
||||
// tries to bind it again and gives a proper validation error.
|
||||
if let Some(current_bind_group) = self.last_states.get_mut(index as usize) {
|
||||
current_bind_group.reset();
|
||||
}
|
||||
dynamic_offsets
|
||||
.extend_from_slice(unsafe { slice::from_raw_parts(offsets, offset_length) });
|
||||
}
|
||||
false
|
||||
}
|
||||
fn reset(&mut self) {
|
||||
self.last_states = [StateChange::new(); hal::MAX_BIND_GROUPS];
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for BindGroupStateChange {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
trait MapPassErr<T, O> {
|
||||
fn map_pass_err(self, scope: PassErrorScope) -> Result<T, O>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Error)]
|
||||
pub enum PassErrorScope {
|
||||
#[error("In a bundle parameter")]
|
||||
Bundle,
|
||||
#[error("In a pass parameter")]
|
||||
Pass(id::CommandEncoderId),
|
||||
#[error("In a set_bind_group command")]
|
||||
SetBindGroup(id::BindGroupId),
|
||||
#[error("In a set_pipeline command")]
|
||||
SetPipelineRender(id::RenderPipelineId),
|
||||
#[error("In a set_pipeline command")]
|
||||
SetPipelineCompute(id::ComputePipelineId),
|
||||
#[error("In a set_push_constant command")]
|
||||
SetPushConstant,
|
||||
#[error("In a set_vertex_buffer command")]
|
||||
SetVertexBuffer(id::BufferId),
|
||||
#[error("In a set_index_buffer command")]
|
||||
SetIndexBuffer(id::BufferId),
|
||||
#[error("In a set_viewport command")]
|
||||
SetViewport,
|
||||
#[error("In a set_scissor_rect command")]
|
||||
SetScissorRect,
|
||||
#[error("In a draw command, indexed:{indexed} indirect:{indirect}")]
|
||||
Draw {
|
||||
indexed: bool,
|
||||
indirect: bool,
|
||||
pipeline: Option<id::RenderPipelineId>,
|
||||
},
|
||||
#[error("While resetting queries after the renderpass was ran")]
|
||||
QueryReset,
|
||||
#[error("In a write_timestamp command")]
|
||||
WriteTimestamp,
|
||||
#[error("In a begin_pipeline_statistics_query command")]
|
||||
BeginPipelineStatisticsQuery,
|
||||
#[error("In a end_pipeline_statistics_query command")]
|
||||
EndPipelineStatisticsQuery,
|
||||
#[error("In a execute_bundle command")]
|
||||
ExecuteBundle,
|
||||
#[error("In a dispatch command, indirect:{indirect}")]
|
||||
Dispatch {
|
||||
indirect: bool,
|
||||
pipeline: Option<id::ComputePipelineId>,
|
||||
},
|
||||
#[error("In a pop_debug_group command")]
|
||||
PopDebugGroup,
|
||||
}
|
||||
|
||||
impl PrettyError for PassErrorScope {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
// This error is not in the error chain, only notes are needed
|
||||
match *self {
|
||||
Self::Pass(id) => {
|
||||
fmt.command_buffer_label(&id);
|
||||
}
|
||||
Self::SetBindGroup(id) => {
|
||||
fmt.bind_group_label(&id);
|
||||
}
|
||||
Self::SetPipelineRender(id) => {
|
||||
fmt.render_pipeline_label(&id);
|
||||
}
|
||||
Self::SetPipelineCompute(id) => {
|
||||
fmt.compute_pipeline_label(&id);
|
||||
}
|
||||
Self::SetVertexBuffer(id) => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
Self::SetIndexBuffer(id) => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
Self::Draw {
|
||||
pipeline: Some(id), ..
|
||||
} => {
|
||||
fmt.render_pipeline_label(&id);
|
||||
}
|
||||
Self::Dispatch {
|
||||
pipeline: Some(id), ..
|
||||
} => {
|
||||
fmt.compute_pipeline_label(&id);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
435
third-party/vendor/wgpu-core/src/command/query.rs
vendored
Normal file
435
third-party/vendor/wgpu-core/src/command/query.rs
vendored
Normal file
|
|
@ -0,0 +1,435 @@
|
|||
use hal::CommandEncoder as _;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
use crate::device::trace::Command as TraceCommand;
|
||||
use crate::{
|
||||
command::{CommandBuffer, CommandEncoderError},
|
||||
global::Global,
|
||||
hal_api::HalApi,
|
||||
hub::Token,
|
||||
id::{self, Id, TypedId},
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
init_tracker::MemoryInitKind,
|
||||
resource::QuerySet,
|
||||
storage::Storage,
|
||||
Epoch, FastHashMap, Index,
|
||||
};
|
||||
use std::{iter, marker::PhantomData};
|
||||
use thiserror::Error;
|
||||
use wgt::BufferAddress;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct QueryResetMap<A: hal::Api> {
|
||||
map: FastHashMap<Index, (Vec<bool>, Epoch)>,
|
||||
_phantom: PhantomData<A>,
|
||||
}
|
||||
impl<A: hal::Api> QueryResetMap<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
map: FastHashMap::default(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn use_query_set(
|
||||
&mut self,
|
||||
id: id::QuerySetId,
|
||||
query_set: &QuerySet<A>,
|
||||
query: u32,
|
||||
) -> bool {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
let vec_pair = self
|
||||
.map
|
||||
.entry(index)
|
||||
.or_insert_with(|| (vec![false; query_set.desc.count as usize], epoch));
|
||||
|
||||
std::mem::replace(&mut vec_pair.0[query as usize], true)
|
||||
}
|
||||
|
||||
pub fn reset_queries(
|
||||
self,
|
||||
raw_encoder: &mut A::CommandEncoder,
|
||||
query_set_storage: &Storage<QuerySet<A>, id::QuerySetId>,
|
||||
backend: wgt::Backend,
|
||||
) -> Result<(), id::QuerySetId> {
|
||||
for (query_set_id, (state, epoch)) in self.map.into_iter() {
|
||||
let id = Id::zip(query_set_id, epoch, backend);
|
||||
let query_set = query_set_storage.get(id).map_err(|_| id)?;
|
||||
|
||||
debug_assert_eq!(state.len(), query_set.desc.count as usize);
|
||||
|
||||
// Need to find all "runs" of values which need resets. If the state vector is:
|
||||
// [false, true, true, false, true], we want to reset [1..3, 4..5]. This minimizes
|
||||
// the amount of resets needed.
|
||||
let mut run_start: Option<u32> = None;
|
||||
for (idx, value) in state.into_iter().chain(iter::once(false)).enumerate() {
|
||||
match (run_start, value) {
|
||||
// We're inside of a run, do nothing
|
||||
(Some(..), true) => {}
|
||||
// We've hit the end of a run, dispatch a reset
|
||||
(Some(start), false) => {
|
||||
run_start = None;
|
||||
unsafe { raw_encoder.reset_queries(&query_set.raw, start..idx as u32) };
|
||||
}
|
||||
// We're starting a run
|
||||
(None, true) => {
|
||||
run_start = Some(idx as u32);
|
||||
}
|
||||
// We're in a run of falses, do nothing.
|
||||
(None, false) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum SimplifiedQueryType {
|
||||
Occlusion,
|
||||
Timestamp,
|
||||
PipelineStatistics,
|
||||
}
|
||||
impl From<wgt::QueryType> for SimplifiedQueryType {
|
||||
fn from(q: wgt::QueryType) -> Self {
|
||||
match q {
|
||||
wgt::QueryType::Occlusion => SimplifiedQueryType::Occlusion,
|
||||
wgt::QueryType::Timestamp => SimplifiedQueryType::Timestamp,
|
||||
wgt::QueryType::PipelineStatistics(..) => SimplifiedQueryType::PipelineStatistics,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error encountered when dealing with queries
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum QueryError {
|
||||
#[error(transparent)]
|
||||
Encoder(#[from] CommandEncoderError),
|
||||
#[error("Error encountered while trying to use queries")]
|
||||
Use(#[from] QueryUseError),
|
||||
#[error("Error encountered while trying to resolve a query")]
|
||||
Resolve(#[from] ResolveError),
|
||||
#[error("Buffer {0:?} is invalid or destroyed")]
|
||||
InvalidBuffer(id::BufferId),
|
||||
#[error("QuerySet {0:?} is invalid or destroyed")]
|
||||
InvalidQuerySet(id::QuerySetId),
|
||||
}
|
||||
|
||||
impl crate::error::PrettyError for QueryError {
|
||||
fn fmt_pretty(&self, fmt: &mut crate::error::ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
match *self {
|
||||
Self::InvalidBuffer(id) => fmt.buffer_label(&id),
|
||||
Self::InvalidQuerySet(id) => fmt.query_set_label(&id),
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error encountered while trying to use queries
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum QueryUseError {
|
||||
#[error("Query {query_index} is out of bounds for a query set of size {query_set_size}")]
|
||||
OutOfBounds {
|
||||
query_index: u32,
|
||||
query_set_size: u32,
|
||||
},
|
||||
#[error("Query {query_index} has already been used within the same renderpass. Queries must only be used once per renderpass")]
|
||||
UsedTwiceInsideRenderpass { query_index: u32 },
|
||||
#[error("Query {new_query_index} was started while query {active_query_index} was already active. No more than one statistic or occlusion query may be active at once")]
|
||||
AlreadyStarted {
|
||||
active_query_index: u32,
|
||||
new_query_index: u32,
|
||||
},
|
||||
#[error("Query was stopped while there was no active query")]
|
||||
AlreadyStopped,
|
||||
#[error("A query of type {query_type:?} was started using a query set of type {set_type:?}")]
|
||||
IncompatibleType {
|
||||
set_type: SimplifiedQueryType,
|
||||
query_type: SimplifiedQueryType,
|
||||
},
|
||||
}
|
||||
|
||||
/// Error encountered while trying to resolve a query.
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ResolveError {
|
||||
#[error("Queries can only be resolved to buffers that contain the QUERY_RESOLVE usage")]
|
||||
MissingBufferUsage,
|
||||
#[error("Resolve buffer offset has to be aligned to `QUERY_RESOLVE_BUFFER_ALIGNMENT")]
|
||||
BufferOffsetAlignment,
|
||||
#[error("Resolving queries {start_query}..{end_query} would overrun the query set of size {query_set_size}")]
|
||||
QueryOverrun {
|
||||
start_query: u32,
|
||||
end_query: u32,
|
||||
query_set_size: u32,
|
||||
},
|
||||
#[error("Resolving queries {start_query}..{end_query} ({stride} byte queries) will end up overrunning the bounds of the destination buffer of size {buffer_size} using offsets {buffer_start_offset}..{buffer_end_offset}")]
|
||||
BufferOverrun {
|
||||
start_query: u32,
|
||||
end_query: u32,
|
||||
stride: u32,
|
||||
buffer_size: BufferAddress,
|
||||
buffer_start_offset: BufferAddress,
|
||||
buffer_end_offset: BufferAddress,
|
||||
},
|
||||
}
|
||||
|
||||
impl<A: HalApi> QuerySet<A> {
|
||||
fn validate_query(
|
||||
&self,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_type: SimplifiedQueryType,
|
||||
query_index: u32,
|
||||
reset_state: Option<&mut QueryResetMap<A>>,
|
||||
) -> Result<&A::QuerySet, QueryUseError> {
|
||||
// We need to defer our resets because we are in a renderpass,
|
||||
// add the usage to the reset map.
|
||||
if let Some(reset) = reset_state {
|
||||
let used = reset.use_query_set(query_set_id, self, query_index);
|
||||
if used {
|
||||
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index });
|
||||
}
|
||||
}
|
||||
|
||||
let simple_set_type = SimplifiedQueryType::from(self.desc.ty);
|
||||
if simple_set_type != query_type {
|
||||
return Err(QueryUseError::IncompatibleType {
|
||||
query_type,
|
||||
set_type: simple_set_type,
|
||||
});
|
||||
}
|
||||
|
||||
if query_index >= self.desc.count {
|
||||
return Err(QueryUseError::OutOfBounds {
|
||||
query_index,
|
||||
query_set_size: self.desc.count,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(&self.raw)
|
||||
}
|
||||
|
||||
pub(super) fn validate_and_write_timestamp(
|
||||
&self,
|
||||
raw_encoder: &mut A::CommandEncoder,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
reset_state: Option<&mut QueryResetMap<A>>,
|
||||
) -> Result<(), QueryUseError> {
|
||||
let needs_reset = reset_state.is_none();
|
||||
let query_set = self.validate_query(
|
||||
query_set_id,
|
||||
SimplifiedQueryType::Timestamp,
|
||||
query_index,
|
||||
reset_state,
|
||||
)?;
|
||||
|
||||
unsafe {
|
||||
// If we don't have a reset state tracker which can defer resets, we must reset now.
|
||||
if needs_reset {
|
||||
raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1));
|
||||
}
|
||||
raw_encoder.write_timestamp(query_set, query_index);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn validate_and_begin_pipeline_statistics_query(
|
||||
&self,
|
||||
raw_encoder: &mut A::CommandEncoder,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
reset_state: Option<&mut QueryResetMap<A>>,
|
||||
active_query: &mut Option<(id::QuerySetId, u32)>,
|
||||
) -> Result<(), QueryUseError> {
|
||||
let needs_reset = reset_state.is_none();
|
||||
let query_set = self.validate_query(
|
||||
query_set_id,
|
||||
SimplifiedQueryType::PipelineStatistics,
|
||||
query_index,
|
||||
reset_state,
|
||||
)?;
|
||||
|
||||
if let Some((_old_id, old_idx)) = active_query.replace((query_set_id, query_index)) {
|
||||
return Err(QueryUseError::AlreadyStarted {
|
||||
active_query_index: old_idx,
|
||||
new_query_index: query_index,
|
||||
});
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// If we don't have a reset state tracker which can defer resets, we must reset now.
|
||||
if needs_reset {
|
||||
raw_encoder.reset_queries(&self.raw, query_index..(query_index + 1));
|
||||
}
|
||||
raw_encoder.begin_query(query_set, query_index);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn end_pipeline_statistics_query<A: HalApi>(
|
||||
raw_encoder: &mut A::CommandEncoder,
|
||||
storage: &Storage<QuerySet<A>, id::QuerySetId>,
|
||||
active_query: &mut Option<(id::QuerySetId, u32)>,
|
||||
) -> Result<(), QueryUseError> {
|
||||
if let Some((query_set_id, query_index)) = active_query.take() {
|
||||
// We can unwrap here as the validity was validated when the active query was set
|
||||
let query_set = storage.get(query_set_id).unwrap();
|
||||
|
||||
unsafe { raw_encoder.end_query(&query_set.raw, query_index) };
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Err(QueryUseError::AlreadyStopped)
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn command_encoder_write_timestamp<A: HalApi>(
|
||||
&self,
|
||||
command_encoder_id: id::CommandEncoderId,
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
) -> Result<(), QueryError> {
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
|
||||
let (query_set_guard, _) = hub.query_sets.read(&mut token);
|
||||
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?;
|
||||
let raw_encoder = cmd_buf.encoder.open();
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::WriteTimestamp {
|
||||
query_set_id,
|
||||
query_index,
|
||||
});
|
||||
}
|
||||
|
||||
let query_set = cmd_buf
|
||||
.trackers
|
||||
.query_sets
|
||||
.add_single(&*query_set_guard, query_set_id)
|
||||
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
|
||||
|
||||
query_set.validate_and_write_timestamp(raw_encoder, query_set_id, query_index, None)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn command_encoder_resolve_query_set<A: HalApi>(
|
||||
&self,
|
||||
command_encoder_id: id::CommandEncoderId,
|
||||
query_set_id: id::QuerySetId,
|
||||
start_query: u32,
|
||||
query_count: u32,
|
||||
destination: id::BufferId,
|
||||
destination_offset: BufferAddress,
|
||||
) -> Result<(), QueryError> {
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
|
||||
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
|
||||
let (buffer_guard, _) = hub.buffers.read(&mut token);
|
||||
|
||||
let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?;
|
||||
let raw_encoder = cmd_buf.encoder.open();
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref mut list) = cmd_buf.commands {
|
||||
list.push(TraceCommand::ResolveQuerySet {
|
||||
query_set_id,
|
||||
start_query,
|
||||
query_count,
|
||||
destination,
|
||||
destination_offset,
|
||||
});
|
||||
}
|
||||
|
||||
if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 {
|
||||
return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment));
|
||||
}
|
||||
|
||||
let query_set = cmd_buf
|
||||
.trackers
|
||||
.query_sets
|
||||
.add_single(&*query_set_guard, query_set_id)
|
||||
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
|
||||
|
||||
let (dst_buffer, dst_pending) = cmd_buf
|
||||
.trackers
|
||||
.buffers
|
||||
.set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST)
|
||||
.ok_or(QueryError::InvalidBuffer(destination))?;
|
||||
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
|
||||
|
||||
if !dst_buffer.usage.contains(wgt::BufferUsages::QUERY_RESOLVE) {
|
||||
return Err(ResolveError::MissingBufferUsage.into());
|
||||
}
|
||||
|
||||
let end_query = start_query + query_count;
|
||||
if end_query > query_set.desc.count {
|
||||
return Err(ResolveError::QueryOverrun {
|
||||
start_query,
|
||||
end_query,
|
||||
query_set_size: query_set.desc.count,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let elements_per_query = match query_set.desc.ty {
|
||||
wgt::QueryType::Occlusion => 1,
|
||||
wgt::QueryType::PipelineStatistics(ps) => ps.bits().count_ones(),
|
||||
wgt::QueryType::Timestamp => 1,
|
||||
};
|
||||
let stride = elements_per_query * wgt::QUERY_SIZE;
|
||||
let bytes_used = (stride * query_count) as BufferAddress;
|
||||
|
||||
let buffer_start_offset = destination_offset;
|
||||
let buffer_end_offset = buffer_start_offset + bytes_used;
|
||||
|
||||
if buffer_end_offset > dst_buffer.size {
|
||||
return Err(ResolveError::BufferOverrun {
|
||||
start_query,
|
||||
end_query,
|
||||
stride,
|
||||
buffer_size: dst_buffer.size,
|
||||
buffer_start_offset,
|
||||
buffer_end_offset,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
cmd_buf
|
||||
.buffer_memory_init_actions
|
||||
.extend(dst_buffer.initialization_status.create_action(
|
||||
destination,
|
||||
buffer_start_offset..buffer_end_offset,
|
||||
MemoryInitKind::ImplicitlyInitialized,
|
||||
));
|
||||
|
||||
unsafe {
|
||||
raw_encoder.transition_buffers(dst_barrier.into_iter());
|
||||
raw_encoder.copy_query_results(
|
||||
&query_set.raw,
|
||||
start_query..end_query,
|
||||
dst_buffer.raw.as_ref().unwrap(),
|
||||
destination_offset,
|
||||
wgt::BufferSize::new_unchecked(stride as u64),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
2588
third-party/vendor/wgpu-core/src/command/render.rs
vendored
Normal file
2588
third-party/vendor/wgpu-core/src/command/render.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
1158
third-party/vendor/wgpu-core/src/command/transfer.rs
vendored
Normal file
1158
third-party/vendor/wgpu-core/src/command/transfer.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
218
third-party/vendor/wgpu-core/src/conv.rs
vendored
Normal file
218
third-party/vendor/wgpu-core/src/conv.rs
vendored
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
use crate::resource;
|
||||
|
||||
pub fn is_power_of_two_u16(val: u16) -> bool {
|
||||
val != 0 && (val & (val - 1)) == 0
|
||||
}
|
||||
|
||||
pub fn is_power_of_two_u32(val: u32) -> bool {
|
||||
val != 0 && (val & (val - 1)) == 0
|
||||
}
|
||||
|
||||
pub fn is_valid_copy_src_texture_format(
|
||||
format: wgt::TextureFormat,
|
||||
aspect: wgt::TextureAspect,
|
||||
) -> bool {
|
||||
use wgt::TextureAspect as Ta;
|
||||
use wgt::TextureFormat as Tf;
|
||||
match (format, aspect) {
|
||||
(Tf::Depth24Plus, _) | (Tf::Depth24PlusStencil8, Ta::DepthOnly) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid_copy_dst_texture_format(
|
||||
format: wgt::TextureFormat,
|
||||
aspect: wgt::TextureAspect,
|
||||
) -> bool {
|
||||
use wgt::TextureAspect as Ta;
|
||||
use wgt::TextureFormat as Tf;
|
||||
match (format, aspect) {
|
||||
(Tf::Depth24Plus | Tf::Depth32Float, _)
|
||||
| (Tf::Depth24PlusStencil8 | Tf::Depth32FloatStencil8, Ta::DepthOnly) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(
|
||||
any(not(target_arch = "wasm32"), target_os = "emscripten"),
|
||||
allow(unused)
|
||||
)]
|
||||
pub fn is_valid_external_image_copy_dst_texture_format(format: wgt::TextureFormat) -> bool {
|
||||
use wgt::TextureFormat as Tf;
|
||||
match format {
|
||||
Tf::R8Unorm
|
||||
| Tf::R16Float
|
||||
| Tf::R32Float
|
||||
| Tf::Rg8Unorm
|
||||
| Tf::Rg16Float
|
||||
| Tf::Rg32Float
|
||||
| Tf::Rgba8Unorm
|
||||
| Tf::Rgba8UnormSrgb
|
||||
| Tf::Bgra8Unorm
|
||||
| Tf::Bgra8UnormSrgb
|
||||
| Tf::Rgb10a2Unorm
|
||||
| Tf::Rgba16Float
|
||||
| Tf::Rgba32Float => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn map_buffer_usage(usage: wgt::BufferUsages) -> hal::BufferUses {
|
||||
let mut u = hal::BufferUses::empty();
|
||||
u.set(
|
||||
hal::BufferUses::MAP_READ,
|
||||
usage.contains(wgt::BufferUsages::MAP_READ),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::MAP_WRITE,
|
||||
usage.contains(wgt::BufferUsages::MAP_WRITE),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::COPY_SRC,
|
||||
usage.contains(wgt::BufferUsages::COPY_SRC),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::COPY_DST,
|
||||
usage.contains(wgt::BufferUsages::COPY_DST),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::INDEX,
|
||||
usage.contains(wgt::BufferUsages::INDEX),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::VERTEX,
|
||||
usage.contains(wgt::BufferUsages::VERTEX),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::UNIFORM,
|
||||
usage.contains(wgt::BufferUsages::UNIFORM),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::STORAGE_READ | hal::BufferUses::STORAGE_READ_WRITE,
|
||||
usage.contains(wgt::BufferUsages::STORAGE),
|
||||
);
|
||||
u.set(
|
||||
hal::BufferUses::INDIRECT,
|
||||
usage.contains(wgt::BufferUsages::INDIRECT),
|
||||
);
|
||||
u
|
||||
}
|
||||
|
||||
pub fn map_texture_usage(
|
||||
usage: wgt::TextureUsages,
|
||||
aspect: hal::FormatAspects,
|
||||
) -> hal::TextureUses {
|
||||
let mut u = hal::TextureUses::empty();
|
||||
u.set(
|
||||
hal::TextureUses::COPY_SRC,
|
||||
usage.contains(wgt::TextureUsages::COPY_SRC),
|
||||
);
|
||||
u.set(
|
||||
hal::TextureUses::COPY_DST,
|
||||
usage.contains(wgt::TextureUsages::COPY_DST),
|
||||
);
|
||||
u.set(
|
||||
hal::TextureUses::RESOURCE,
|
||||
usage.contains(wgt::TextureUsages::TEXTURE_BINDING),
|
||||
);
|
||||
u.set(
|
||||
hal::TextureUses::STORAGE_READ | hal::TextureUses::STORAGE_READ_WRITE,
|
||||
usage.contains(wgt::TextureUsages::STORAGE_BINDING),
|
||||
);
|
||||
let is_color = aspect.contains(hal::FormatAspects::COLOR);
|
||||
u.set(
|
||||
hal::TextureUses::COLOR_TARGET,
|
||||
usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) && is_color,
|
||||
);
|
||||
u.set(
|
||||
hal::TextureUses::DEPTH_STENCIL_READ | hal::TextureUses::DEPTH_STENCIL_WRITE,
|
||||
usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) && !is_color,
|
||||
);
|
||||
u
|
||||
}
|
||||
|
||||
pub fn map_texture_usage_from_hal(uses: hal::TextureUses) -> wgt::TextureUsages {
|
||||
let mut u = wgt::TextureUsages::empty();
|
||||
u.set(
|
||||
wgt::TextureUsages::COPY_SRC,
|
||||
uses.contains(hal::TextureUses::COPY_SRC),
|
||||
);
|
||||
u.set(
|
||||
wgt::TextureUsages::COPY_DST,
|
||||
uses.contains(hal::TextureUses::COPY_DST),
|
||||
);
|
||||
u.set(
|
||||
wgt::TextureUsages::TEXTURE_BINDING,
|
||||
uses.contains(hal::TextureUses::RESOURCE),
|
||||
);
|
||||
u.set(
|
||||
wgt::TextureUsages::STORAGE_BINDING,
|
||||
uses.contains(hal::TextureUses::STORAGE_READ | hal::TextureUses::STORAGE_READ_WRITE),
|
||||
);
|
||||
u.set(
|
||||
wgt::TextureUsages::RENDER_ATTACHMENT,
|
||||
uses.contains(hal::TextureUses::COLOR_TARGET),
|
||||
);
|
||||
u
|
||||
}
|
||||
|
||||
pub fn check_texture_dimension_size(
|
||||
dimension: wgt::TextureDimension,
|
||||
wgt::Extent3d {
|
||||
width,
|
||||
height,
|
||||
depth_or_array_layers,
|
||||
}: wgt::Extent3d,
|
||||
sample_size: u32,
|
||||
limits: &wgt::Limits,
|
||||
) -> Result<(), resource::TextureDimensionError> {
|
||||
use resource::{TextureDimensionError as Tde, TextureErrorDimension as Ted};
|
||||
use wgt::TextureDimension::*;
|
||||
|
||||
let (extent_limits, sample_limit) = match dimension {
|
||||
D1 => ([limits.max_texture_dimension_1d, 1, 1], 1),
|
||||
D2 => (
|
||||
[
|
||||
limits.max_texture_dimension_2d,
|
||||
limits.max_texture_dimension_2d,
|
||||
limits.max_texture_array_layers,
|
||||
],
|
||||
32,
|
||||
),
|
||||
D3 => (
|
||||
[
|
||||
limits.max_texture_dimension_3d,
|
||||
limits.max_texture_dimension_3d,
|
||||
limits.max_texture_dimension_3d,
|
||||
],
|
||||
1,
|
||||
),
|
||||
};
|
||||
|
||||
for (&dim, (&given, &limit)) in [Ted::X, Ted::Y, Ted::Z].iter().zip(
|
||||
[width, height, depth_or_array_layers]
|
||||
.iter()
|
||||
.zip(extent_limits.iter()),
|
||||
) {
|
||||
if given == 0 {
|
||||
return Err(Tde::Zero(dim));
|
||||
}
|
||||
if given > limit {
|
||||
return Err(Tde::LimitExceeded { dim, given, limit });
|
||||
}
|
||||
}
|
||||
if sample_size == 0 || sample_size > sample_limit || !is_power_of_two_u32(sample_size) {
|
||||
return Err(Tde::InvalidSampleCount(sample_size));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn bind_group_layout_flags(features: wgt::Features) -> hal::BindGroupLayoutFlags {
|
||||
let mut flags = hal::BindGroupLayoutFlags::empty();
|
||||
flags.set(
|
||||
hal::BindGroupLayoutFlags::PARTIALLY_BOUND,
|
||||
features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY),
|
||||
);
|
||||
flags
|
||||
}
|
||||
2750
third-party/vendor/wgpu-core/src/device/global.rs
vendored
Normal file
2750
third-party/vendor/wgpu-core/src/device/global.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
914
third-party/vendor/wgpu-core/src/device/life.rs
vendored
Normal file
914
third-party/vendor/wgpu-core/src/device/life.rs
vendored
Normal file
|
|
@ -0,0 +1,914 @@
|
|||
#[cfg(feature = "trace")]
|
||||
use crate::device::trace;
|
||||
use crate::{
|
||||
device::{
|
||||
queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
|
||||
DeviceError,
|
||||
},
|
||||
hal_api::HalApi,
|
||||
hub::{Hub, Token},
|
||||
id,
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
resource,
|
||||
track::{BindGroupStates, RenderBundleScope, Tracker},
|
||||
RefCount, Stored, SubmissionIndex,
|
||||
};
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use hal::Device as _;
|
||||
use parking_lot::Mutex;
|
||||
use thiserror::Error;
|
||||
|
||||
use std::mem;
|
||||
|
||||
/// A struct that keeps lists of resources that are no longer needed by the user.
|
||||
#[derive(Debug, Default)]
|
||||
pub(super) struct SuspectedResources {
|
||||
pub(super) buffers: Vec<id::Valid<id::BufferId>>,
|
||||
pub(super) textures: Vec<id::Valid<id::TextureId>>,
|
||||
pub(super) texture_views: Vec<id::Valid<id::TextureViewId>>,
|
||||
pub(super) samplers: Vec<id::Valid<id::SamplerId>>,
|
||||
pub(super) bind_groups: Vec<id::Valid<id::BindGroupId>>,
|
||||
pub(super) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
|
||||
pub(super) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
|
||||
pub(super) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
|
||||
pub(super) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
|
||||
pub(super) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
|
||||
pub(super) query_sets: Vec<id::Valid<id::QuerySetId>>,
|
||||
}
|
||||
|
||||
impl SuspectedResources {
|
||||
pub(super) fn clear(&mut self) {
|
||||
self.buffers.clear();
|
||||
self.textures.clear();
|
||||
self.texture_views.clear();
|
||||
self.samplers.clear();
|
||||
self.bind_groups.clear();
|
||||
self.compute_pipelines.clear();
|
||||
self.render_pipelines.clear();
|
||||
self.bind_group_layouts.clear();
|
||||
self.pipeline_layouts.clear();
|
||||
self.render_bundles.clear();
|
||||
self.query_sets.clear();
|
||||
}
|
||||
|
||||
pub(super) fn extend(&mut self, other: &Self) {
|
||||
self.buffers.extend_from_slice(&other.buffers);
|
||||
self.textures.extend_from_slice(&other.textures);
|
||||
self.texture_views.extend_from_slice(&other.texture_views);
|
||||
self.samplers.extend_from_slice(&other.samplers);
|
||||
self.bind_groups.extend_from_slice(&other.bind_groups);
|
||||
self.compute_pipelines
|
||||
.extend_from_slice(&other.compute_pipelines);
|
||||
self.render_pipelines
|
||||
.extend_from_slice(&other.render_pipelines);
|
||||
self.bind_group_layouts
|
||||
.extend_from_slice(&other.bind_group_layouts);
|
||||
self.pipeline_layouts
|
||||
.extend_from_slice(&other.pipeline_layouts);
|
||||
self.render_bundles.extend_from_slice(&other.render_bundles);
|
||||
self.query_sets.extend_from_slice(&other.query_sets);
|
||||
}
|
||||
|
||||
pub(super) fn add_render_bundle_scope<A: HalApi>(&mut self, trackers: &RenderBundleScope<A>) {
|
||||
self.buffers.extend(trackers.buffers.used());
|
||||
self.textures.extend(trackers.textures.used());
|
||||
self.bind_groups.extend(trackers.bind_groups.used());
|
||||
self.render_pipelines
|
||||
.extend(trackers.render_pipelines.used());
|
||||
self.query_sets.extend(trackers.query_sets.used());
|
||||
}
|
||||
|
||||
pub(super) fn add_bind_group_states<A: HalApi>(&mut self, trackers: &BindGroupStates<A>) {
|
||||
self.buffers.extend(trackers.buffers.used());
|
||||
self.textures.extend(trackers.textures.used());
|
||||
self.texture_views.extend(trackers.views.used());
|
||||
self.samplers.extend(trackers.samplers.used());
|
||||
}
|
||||
}
|
||||
|
||||
/// Raw backend resources that should be freed shortly.
|
||||
#[derive(Debug)]
|
||||
struct NonReferencedResources<A: hal::Api> {
|
||||
buffers: Vec<A::Buffer>,
|
||||
textures: Vec<A::Texture>,
|
||||
texture_views: Vec<A::TextureView>,
|
||||
samplers: Vec<A::Sampler>,
|
||||
bind_groups: Vec<A::BindGroup>,
|
||||
compute_pipes: Vec<A::ComputePipeline>,
|
||||
render_pipes: Vec<A::RenderPipeline>,
|
||||
bind_group_layouts: Vec<A::BindGroupLayout>,
|
||||
pipeline_layouts: Vec<A::PipelineLayout>,
|
||||
query_sets: Vec<A::QuerySet>,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> NonReferencedResources<A> {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
buffers: Vec::new(),
|
||||
textures: Vec::new(),
|
||||
texture_views: Vec::new(),
|
||||
samplers: Vec::new(),
|
||||
bind_groups: Vec::new(),
|
||||
compute_pipes: Vec::new(),
|
||||
render_pipes: Vec::new(),
|
||||
bind_group_layouts: Vec::new(),
|
||||
pipeline_layouts: Vec::new(),
|
||||
query_sets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn extend(&mut self, other: Self) {
|
||||
self.buffers.extend(other.buffers);
|
||||
self.textures.extend(other.textures);
|
||||
self.texture_views.extend(other.texture_views);
|
||||
self.samplers.extend(other.samplers);
|
||||
self.bind_groups.extend(other.bind_groups);
|
||||
self.compute_pipes.extend(other.compute_pipes);
|
||||
self.render_pipes.extend(other.render_pipes);
|
||||
self.query_sets.extend(other.query_sets);
|
||||
assert!(other.bind_group_layouts.is_empty());
|
||||
assert!(other.pipeline_layouts.is_empty());
|
||||
}
|
||||
|
||||
unsafe fn clean(&mut self, device: &A::Device) {
|
||||
if !self.buffers.is_empty() {
|
||||
profiling::scope!("destroy_buffers");
|
||||
for raw in self.buffers.drain(..) {
|
||||
unsafe { device.destroy_buffer(raw) };
|
||||
}
|
||||
}
|
||||
if !self.textures.is_empty() {
|
||||
profiling::scope!("destroy_textures");
|
||||
for raw in self.textures.drain(..) {
|
||||
unsafe { device.destroy_texture(raw) };
|
||||
}
|
||||
}
|
||||
if !self.texture_views.is_empty() {
|
||||
profiling::scope!("destroy_texture_views");
|
||||
for raw in self.texture_views.drain(..) {
|
||||
unsafe { device.destroy_texture_view(raw) };
|
||||
}
|
||||
}
|
||||
if !self.samplers.is_empty() {
|
||||
profiling::scope!("destroy_samplers");
|
||||
for raw in self.samplers.drain(..) {
|
||||
unsafe { device.destroy_sampler(raw) };
|
||||
}
|
||||
}
|
||||
if !self.bind_groups.is_empty() {
|
||||
profiling::scope!("destroy_bind_groups");
|
||||
for raw in self.bind_groups.drain(..) {
|
||||
unsafe { device.destroy_bind_group(raw) };
|
||||
}
|
||||
}
|
||||
if !self.compute_pipes.is_empty() {
|
||||
profiling::scope!("destroy_compute_pipelines");
|
||||
for raw in self.compute_pipes.drain(..) {
|
||||
unsafe { device.destroy_compute_pipeline(raw) };
|
||||
}
|
||||
}
|
||||
if !self.render_pipes.is_empty() {
|
||||
profiling::scope!("destroy_render_pipelines");
|
||||
for raw in self.render_pipes.drain(..) {
|
||||
unsafe { device.destroy_render_pipeline(raw) };
|
||||
}
|
||||
}
|
||||
if !self.bind_group_layouts.is_empty() {
|
||||
profiling::scope!("destroy_bind_group_layouts");
|
||||
for raw in self.bind_group_layouts.drain(..) {
|
||||
unsafe { device.destroy_bind_group_layout(raw) };
|
||||
}
|
||||
}
|
||||
if !self.pipeline_layouts.is_empty() {
|
||||
profiling::scope!("destroy_pipeline_layouts");
|
||||
for raw in self.pipeline_layouts.drain(..) {
|
||||
unsafe { device.destroy_pipeline_layout(raw) };
|
||||
}
|
||||
}
|
||||
if !self.query_sets.is_empty() {
|
||||
profiling::scope!("destroy_query_sets");
|
||||
for raw in self.query_sets.drain(..) {
|
||||
unsafe { device.destroy_query_set(raw) };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resources used by a queue submission, and work to be done once it completes.
|
||||
struct ActiveSubmission<A: hal::Api> {
|
||||
/// The index of the submission we track.
|
||||
///
|
||||
/// When `Device::fence`'s value is greater than or equal to this, our queue
|
||||
/// submission has completed.
|
||||
index: SubmissionIndex,
|
||||
|
||||
/// Resources to be freed once this queue submission has completed.
|
||||
///
|
||||
/// When the device is polled, for completed submissions,
|
||||
/// `triage_submissions` merges these into
|
||||
/// `LifetimeTracker::free_resources`. From there,
|
||||
/// `LifetimeTracker::cleanup` passes them to the hal to be freed.
|
||||
///
|
||||
/// This includes things like temporary resources and resources that are
|
||||
/// used by submitted commands but have been dropped by the user (meaning that
|
||||
/// this submission is their last reference.)
|
||||
last_resources: NonReferencedResources<A>,
|
||||
|
||||
/// Buffers to be mapped once this submission has completed.
|
||||
mapped: Vec<id::Valid<id::BufferId>>,
|
||||
|
||||
encoders: Vec<EncoderInFlight<A>>,
|
||||
work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum WaitIdleError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Tried to wait using a submission index from the wrong device. Submission index is from device {0:?}. Called poll on device {1:?}.")]
|
||||
WrongSubmissionIndex(id::QueueId, id::DeviceId),
|
||||
#[error("GPU got stuck :(")]
|
||||
StuckGpu,
|
||||
}
|
||||
|
||||
/// Resource tracking for a device.
|
||||
///
|
||||
/// ## Host mapping buffers
|
||||
///
|
||||
/// A buffer cannot be mapped until all active queue submissions that use it
|
||||
/// have completed. To that end:
|
||||
///
|
||||
/// - Each buffer's `LifeGuard::submission_index` records the index of the
|
||||
/// most recent queue submission that uses that buffer.
|
||||
///
|
||||
/// - Calling `map_async` adds the buffer to `self.mapped`, and changes
|
||||
/// `Buffer::map_state` to prevent it from being used in any new
|
||||
/// submissions.
|
||||
///
|
||||
/// - When the device is polled, the following `LifetimeTracker` methods decide
|
||||
/// what should happen next:
|
||||
///
|
||||
/// 1) `triage_mapped` drains `self.mapped`, checking the submission index
|
||||
/// of each buffer against the queue submissions that have finished
|
||||
/// execution. Buffers used by submissions still in flight go in
|
||||
/// `self.active[index].mapped`, and the rest go into
|
||||
/// `self.ready_to_map`.
|
||||
///
|
||||
/// 2) `triage_submissions` moves entries in `self.active[i]` for completed
|
||||
/// submissions to `self.ready_to_map`. At this point, both
|
||||
/// `self.active` and `self.ready_to_map` are up to date with the given
|
||||
/// submission index.
|
||||
///
|
||||
/// 3) `handle_mapping` drains `self.ready_to_map` and actually maps the
|
||||
/// buffers, collecting a list of notification closures to call. But any
|
||||
/// buffers that were dropped by the user get moved to
|
||||
/// `self.free_resources`.
|
||||
///
|
||||
/// 4) `cleanup` frees everything in `free_resources`.
|
||||
///
|
||||
/// Only `self.mapped` holds a `RefCount` for the buffer; it is dropped by
|
||||
/// `triage_mapped`.
|
||||
pub(super) struct LifetimeTracker<A: hal::Api> {
|
||||
/// Resources that the user has requested be mapped, but which are used by
|
||||
/// queue submissions still in flight.
|
||||
mapped: Vec<Stored<id::BufferId>>,
|
||||
|
||||
/// Buffers can be used in a submission that is yet to be made, by the
|
||||
/// means of `write_buffer()`, so we have a special place for them.
|
||||
pub future_suspected_buffers: Vec<Stored<id::BufferId>>,
|
||||
|
||||
/// Textures can be used in the upcoming submission by `write_texture`.
|
||||
pub future_suspected_textures: Vec<Stored<id::TextureId>>,
|
||||
|
||||
/// Resources whose user handle has died (i.e. drop/destroy has been called)
|
||||
/// and will likely be ready for destruction soon.
|
||||
pub suspected_resources: SuspectedResources,
|
||||
|
||||
/// Resources used by queue submissions still in flight. One entry per
|
||||
/// submission, with older submissions appearing before younger.
|
||||
///
|
||||
/// Entries are added by `track_submission` and drained by
|
||||
/// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
|
||||
/// to particular entries.
|
||||
active: Vec<ActiveSubmission<A>>,
|
||||
|
||||
/// Raw backend resources that are neither referenced nor used.
|
||||
///
|
||||
/// These are freed by `LifeTracker::cleanup`, which is called from periodic
|
||||
/// maintenance functions like `Global::device_poll`, and when a device is
|
||||
/// destroyed.
|
||||
free_resources: NonReferencedResources<A>,
|
||||
|
||||
/// Buffers the user has asked us to map, and which are not used by any
|
||||
/// queue submission still in flight.
|
||||
ready_to_map: Vec<id::Valid<id::BufferId>>,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> LifetimeTracker<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
mapped: Vec::new(),
|
||||
future_suspected_buffers: Vec::new(),
|
||||
future_suspected_textures: Vec::new(),
|
||||
suspected_resources: SuspectedResources::default(),
|
||||
active: Vec::new(),
|
||||
free_resources: NonReferencedResources::new(),
|
||||
ready_to_map: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if there are no queue submissions still in flight.
|
||||
pub fn queue_empty(&self) -> bool {
|
||||
self.active.is_empty()
|
||||
}
|
||||
|
||||
/// Start tracking resources associated with a new queue submission.
|
||||
pub fn track_submission(
|
||||
&mut self,
|
||||
index: SubmissionIndex,
|
||||
temp_resources: impl Iterator<Item = TempResource<A>>,
|
||||
encoders: Vec<EncoderInFlight<A>>,
|
||||
) {
|
||||
let mut last_resources = NonReferencedResources::new();
|
||||
for res in temp_resources {
|
||||
match res {
|
||||
TempResource::Buffer(raw) => last_resources.buffers.push(raw),
|
||||
TempResource::Texture(raw, views) => {
|
||||
last_resources.textures.push(raw);
|
||||
last_resources.texture_views.extend(views);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.active.push(ActiveSubmission {
|
||||
index,
|
||||
last_resources,
|
||||
mapped: Vec::new(),
|
||||
encoders,
|
||||
work_done_closures: SmallVec::new(),
|
||||
});
|
||||
}
|
||||
|
||||
pub fn post_submit(&mut self) {
|
||||
self.suspected_resources.buffers.extend(
|
||||
self.future_suspected_buffers
|
||||
.drain(..)
|
||||
.map(|stored| stored.value),
|
||||
);
|
||||
self.suspected_resources.textures.extend(
|
||||
self.future_suspected_textures
|
||||
.drain(..)
|
||||
.map(|stored| stored.value),
|
||||
);
|
||||
}
|
||||
|
||||
pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
|
||||
self.mapped.push(Stored { value, ref_count });
|
||||
}
|
||||
|
||||
/// Sort out the consequences of completed submissions.
|
||||
///
|
||||
/// Assume that all submissions up through `last_done` have completed.
|
||||
///
|
||||
/// - Buffers used by those submissions are now ready to map, if
|
||||
/// requested. Add any buffers in the submission's [`mapped`] list to
|
||||
/// [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`] will find
|
||||
/// them.
|
||||
///
|
||||
/// - Resources whose final use was in those submissions are now ready to
|
||||
/// free. Add any resources in the submission's [`last_resources`] table
|
||||
/// to [`self.free_resources`], where [`LifetimeTracker::cleanup`] will find
|
||||
/// them.
|
||||
///
|
||||
/// Return a list of [`SubmittedWorkDoneClosure`]s to run.
|
||||
///
|
||||
/// [`mapped`]: ActiveSubmission::mapped
|
||||
/// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
|
||||
/// [`last_resources`]: ActiveSubmission::last_resources
|
||||
/// [`self.free_resources`]: LifetimeTracker::free_resources
|
||||
/// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
|
||||
#[must_use]
|
||||
pub fn triage_submissions(
|
||||
&mut self,
|
||||
last_done: SubmissionIndex,
|
||||
command_allocator: &Mutex<super::CommandAllocator<A>>,
|
||||
) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
|
||||
profiling::scope!("triage_submissions");
|
||||
|
||||
//TODO: enable when `is_sorted_by_key` is stable
|
||||
//debug_assert!(self.active.is_sorted_by_key(|a| a.index));
|
||||
let done_count = self
|
||||
.active
|
||||
.iter()
|
||||
.position(|a| a.index > last_done)
|
||||
.unwrap_or(self.active.len());
|
||||
|
||||
let mut work_done_closures = SmallVec::new();
|
||||
for a in self.active.drain(..done_count) {
|
||||
log::trace!("Active submission {} is done", a.index);
|
||||
self.free_resources.extend(a.last_resources);
|
||||
self.ready_to_map.extend(a.mapped);
|
||||
for encoder in a.encoders {
|
||||
let raw = unsafe { encoder.land() };
|
||||
command_allocator.lock().release_encoder(raw);
|
||||
}
|
||||
work_done_closures.extend(a.work_done_closures);
|
||||
}
|
||||
work_done_closures
|
||||
}
|
||||
|
||||
pub fn cleanup(&mut self, device: &A::Device) {
|
||||
profiling::scope!("LifetimeTracker::cleanup");
|
||||
unsafe {
|
||||
self.free_resources.clean(device);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn schedule_resource_destruction(
|
||||
&mut self,
|
||||
temp_resource: TempResource<A>,
|
||||
last_submit_index: SubmissionIndex,
|
||||
) {
|
||||
let resources = self
|
||||
.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == last_submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources);
|
||||
match temp_resource {
|
||||
TempResource::Buffer(raw) => resources.buffers.push(raw),
|
||||
TempResource::Texture(raw, views) => {
|
||||
resources.texture_views.extend(views);
|
||||
resources.textures.push(raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_work_done_closure(
|
||||
&mut self,
|
||||
closure: SubmittedWorkDoneClosure,
|
||||
) -> Option<SubmittedWorkDoneClosure> {
|
||||
match self.active.last_mut() {
|
||||
Some(active) => {
|
||||
active.work_done_closures.push(closure);
|
||||
None
|
||||
}
|
||||
// Note: we can't immediately invoke the closure, since it assumes
|
||||
// nothing is currently locked in the hubs.
|
||||
None => Some(closure),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: HalApi> LifetimeTracker<A> {
|
||||
/// Identify resources to free, according to `trackers` and `self.suspected_resources`.
|
||||
///
|
||||
/// Given `trackers`, the [`Tracker`] belonging to same [`Device`] as
|
||||
/// `self`, and `hub`, the [`Hub`] to which that `Device` belongs:
|
||||
///
|
||||
/// Remove from `trackers` each resource mentioned in
|
||||
/// [`self.suspected_resources`]. If `trackers` held the final reference to
|
||||
/// that resource, add it to the appropriate free list, to be destroyed by
|
||||
/// the hal:
|
||||
///
|
||||
/// - Add resources used by queue submissions still in flight to the
|
||||
/// [`last_resources`] table of the last such submission's entry in
|
||||
/// [`self.active`]. When that submission has finished execution. the
|
||||
/// [`triage_submissions`] method will move them to
|
||||
/// [`self.free_resources`].
|
||||
///
|
||||
/// - Add resources that can be freed right now to [`self.free_resources`]
|
||||
/// directly. [`LifetimeTracker::cleanup`] will take care of them as
|
||||
/// part of this poll.
|
||||
///
|
||||
/// ## Entrained resources
|
||||
///
|
||||
/// This function finds resources that are used only by other resources
|
||||
/// ready to be freed, and adds those to the free lists as well. For
|
||||
/// example, if there's some texture `T` used only by some texture view
|
||||
/// `TV`, then if `TV` can be freed, `T` gets added to the free lists too.
|
||||
///
|
||||
/// Since `wgpu-core` resource ownership patterns are acyclic, we can visit
|
||||
/// each type that can be owned after all types that could possibly own
|
||||
/// it. This way, we can detect all free-able objects in a single pass,
|
||||
/// simply by starting with types that are roots of the ownership DAG (like
|
||||
/// render bundles) and working our way towards leaf types (like buffers).
|
||||
///
|
||||
/// [`Device`]: super::Device
|
||||
/// [`self.suspected_resources`]: LifetimeTracker::suspected_resources
|
||||
/// [`last_resources`]: ActiveSubmission::last_resources
|
||||
/// [`self.active`]: LifetimeTracker::active
|
||||
/// [`triage_submissions`]: LifetimeTracker::triage_submissions
|
||||
/// [`self.free_resources`]: LifetimeTracker::free_resources
|
||||
pub(super) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
|
||||
&mut self,
|
||||
hub: &Hub<A, G>,
|
||||
trackers: &Mutex<Tracker<A>>,
|
||||
#[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
|
||||
token: &mut Token<super::Device<A>>,
|
||||
) {
|
||||
profiling::scope!("triage_suspected");
|
||||
|
||||
if !self.suspected_resources.render_bundles.is_empty() {
|
||||
let (mut guard, _) = hub.render_bundles.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
while let Some(id) = self.suspected_resources.render_bundles.pop() {
|
||||
if trackers.bundles.remove_abandoned(id) {
|
||||
log::debug!("Bundle {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyRenderBundle(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
|
||||
self.suspected_resources.add_render_bundle_scope(&res.used);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.bind_groups.is_empty() {
|
||||
let (mut guard, _) = hub.bind_groups.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
while let Some(id) = self.suspected_resources.bind_groups.pop() {
|
||||
if trackers.bind_groups.remove_abandoned(id) {
|
||||
log::debug!("Bind group {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyBindGroup(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
|
||||
self.suspected_resources.add_bind_group_states(&res.used);
|
||||
|
||||
self.suspected_resources
|
||||
.bind_group_layouts
|
||||
.push(res.layout_id);
|
||||
|
||||
let submit_index = res.life_guard.life_count();
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.bind_groups
|
||||
.push(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.texture_views.is_empty() {
|
||||
let (mut guard, _) = hub.texture_views.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
let mut list = mem::take(&mut self.suspected_resources.texture_views);
|
||||
for id in list.drain(..) {
|
||||
if trackers.views.remove_abandoned(id) {
|
||||
log::debug!("Texture view {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyTextureView(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
|
||||
self.suspected_resources.textures.push(res.parent_id.value);
|
||||
let submit_index = res.life_guard.life_count();
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.texture_views
|
||||
.push(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.suspected_resources.texture_views = list;
|
||||
}
|
||||
|
||||
if !self.suspected_resources.textures.is_empty() {
|
||||
let (mut guard, _) = hub.textures.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
for id in self.suspected_resources.textures.drain(..) {
|
||||
if trackers.textures.remove_abandoned(id) {
|
||||
log::debug!("Texture {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyTexture(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
|
||||
let submit_index = res.life_guard.life_count();
|
||||
let raw = match res.inner {
|
||||
resource::TextureInner::Native { raw: Some(raw) } => raw,
|
||||
_ => continue,
|
||||
};
|
||||
let non_referenced_resources = self
|
||||
.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources);
|
||||
|
||||
non_referenced_resources.textures.push(raw);
|
||||
if let resource::TextureClearMode::RenderPass { clear_views, .. } =
|
||||
res.clear_mode
|
||||
{
|
||||
non_referenced_resources
|
||||
.texture_views
|
||||
.extend(clear_views.into_iter());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.samplers.is_empty() {
|
||||
let (mut guard, _) = hub.samplers.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
for id in self.suspected_resources.samplers.drain(..) {
|
||||
if trackers.samplers.remove_abandoned(id) {
|
||||
log::debug!("Sampler {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroySampler(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
|
||||
let submit_index = res.life_guard.life_count();
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.samplers
|
||||
.push(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.buffers.is_empty() {
|
||||
let (mut guard, _) = hub.buffers.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
for id in self.suspected_resources.buffers.drain(..) {
|
||||
if trackers.buffers.remove_abandoned(id) {
|
||||
log::debug!("Buffer {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyBuffer(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
|
||||
let submit_index = res.life_guard.life_count();
|
||||
if let resource::BufferMapState::Init { stage_buffer, .. } = res.map_state {
|
||||
self.free_resources.buffers.push(stage_buffer);
|
||||
}
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.buffers
|
||||
.extend(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.compute_pipelines.is_empty() {
|
||||
let (mut guard, _) = hub.compute_pipelines.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
for id in self.suspected_resources.compute_pipelines.drain(..) {
|
||||
if trackers.compute_pipelines.remove_abandoned(id) {
|
||||
log::debug!("Compute pipeline {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyComputePipeline(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
|
||||
let submit_index = res.life_guard.life_count();
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.compute_pipes
|
||||
.push(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.render_pipelines.is_empty() {
|
||||
let (mut guard, _) = hub.render_pipelines.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
for id in self.suspected_resources.render_pipelines.drain(..) {
|
||||
if trackers.render_pipelines.remove_abandoned(id) {
|
||||
log::debug!("Render pipeline {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyRenderPipeline(id.0));
|
||||
}
|
||||
|
||||
if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
|
||||
let submit_index = res.life_guard.life_count();
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.render_pipes
|
||||
.push(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.pipeline_layouts.is_empty() {
|
||||
let (mut guard, _) = hub.pipeline_layouts.write(token);
|
||||
|
||||
for Stored {
|
||||
value: id,
|
||||
ref_count,
|
||||
} in self.suspected_resources.pipeline_layouts.drain(..)
|
||||
{
|
||||
//Note: this has to happen after all the suspected pipelines are destroyed
|
||||
if ref_count.load() == 1 {
|
||||
log::debug!("Pipeline layout {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyPipelineLayout(id.0));
|
||||
}
|
||||
|
||||
if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
|
||||
self.suspected_resources
|
||||
.bind_group_layouts
|
||||
.extend_from_slice(&lay.bind_group_layout_ids);
|
||||
self.free_resources.pipeline_layouts.push(lay.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.bind_group_layouts.is_empty() {
|
||||
let (mut guard, _) = hub.bind_group_layouts.write(token);
|
||||
|
||||
for id in self.suspected_resources.bind_group_layouts.drain(..) {
|
||||
//Note: this has to happen after all the suspected pipelines are destroyed
|
||||
//Note: nothing else can bump the refcount since the guard is locked exclusively
|
||||
//Note: same BGL can appear multiple times in the list, but only the last
|
||||
// encounter could drop the refcount to 0.
|
||||
if guard[id].multi_ref_count.dec_and_check_empty() {
|
||||
log::debug!("Bind group layout {:?} will be destroyed", id);
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(t) = trace {
|
||||
t.lock().add(trace::Action::DestroyBindGroupLayout(id.0));
|
||||
}
|
||||
if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
|
||||
self.free_resources.bind_group_layouts.push(lay.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.suspected_resources.query_sets.is_empty() {
|
||||
let (mut guard, _) = hub.query_sets.write(token);
|
||||
let mut trackers = trackers.lock();
|
||||
|
||||
for id in self.suspected_resources.query_sets.drain(..) {
|
||||
if trackers.query_sets.remove_abandoned(id) {
|
||||
log::debug!("Query set {:?} will be destroyed", id);
|
||||
// #[cfg(feature = "trace")]
|
||||
// trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
|
||||
if let Some(res) = hub.query_sets.unregister_locked(id.0, &mut *guard) {
|
||||
let submit_index = res.life_guard.life_count();
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
|
||||
.query_sets
|
||||
.push(res.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine which buffers are ready to map, and which must wait for the
|
||||
/// GPU.
|
||||
///
|
||||
/// See the documentation for [`LifetimeTracker`] for details.
|
||||
pub(super) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
|
||||
&mut self,
|
||||
hub: &Hub<A, G>,
|
||||
token: &mut Token<super::Device<A>>,
|
||||
) {
|
||||
if self.mapped.is_empty() {
|
||||
return;
|
||||
}
|
||||
let (buffer_guard, _) = hub.buffers.read(token);
|
||||
|
||||
for stored in self.mapped.drain(..) {
|
||||
let resource_id = stored.value;
|
||||
let buf = &buffer_guard[resource_id];
|
||||
|
||||
let submit_index = buf.life_guard.life_count();
|
||||
log::trace!(
|
||||
"Mapping of {:?} at submission {:?} gets assigned to active {:?}",
|
||||
resource_id,
|
||||
submit_index,
|
||||
self.active.iter().position(|a| a.index == submit_index)
|
||||
);
|
||||
|
||||
self.active
|
||||
.iter_mut()
|
||||
.find(|a| a.index == submit_index)
|
||||
.map_or(&mut self.ready_to_map, |a| &mut a.mapped)
|
||||
.push(resource_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Map the buffers in `self.ready_to_map`.
|
||||
///
|
||||
/// Return a list of mapping notifications to send.
|
||||
///
|
||||
/// See the documentation for [`LifetimeTracker`] for details.
|
||||
#[must_use]
|
||||
pub(super) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
|
||||
&mut self,
|
||||
hub: &Hub<A, G>,
|
||||
raw: &A::Device,
|
||||
trackers: &Mutex<Tracker<A>>,
|
||||
token: &mut Token<super::Device<A>>,
|
||||
) -> Vec<super::BufferMapPendingClosure> {
|
||||
if self.ready_to_map.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
let (mut buffer_guard, _) = hub.buffers.write(token);
|
||||
let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
|
||||
Vec::with_capacity(self.ready_to_map.len());
|
||||
let mut trackers = trackers.lock();
|
||||
for buffer_id in self.ready_to_map.drain(..) {
|
||||
let buffer = &mut buffer_guard[buffer_id];
|
||||
if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
|
||||
{
|
||||
buffer.map_state = resource::BufferMapState::Idle;
|
||||
log::debug!("Mapping request is dropped because the buffer is destroyed.");
|
||||
if let Some(buf) = hub
|
||||
.buffers
|
||||
.unregister_locked(buffer_id.0, &mut *buffer_guard)
|
||||
{
|
||||
self.free_resources.buffers.extend(buf.raw);
|
||||
}
|
||||
} else {
|
||||
let mapping = match std::mem::replace(
|
||||
&mut buffer.map_state,
|
||||
resource::BufferMapState::Idle,
|
||||
) {
|
||||
resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
|
||||
// Mapping cancelled
|
||||
resource::BufferMapState::Idle => continue,
|
||||
// Mapping queued at least twice by map -> unmap -> map
|
||||
// and was already successfully mapped below
|
||||
active @ resource::BufferMapState::Active { .. } => {
|
||||
buffer.map_state = active;
|
||||
continue;
|
||||
}
|
||||
_ => panic!("No pending mapping."),
|
||||
};
|
||||
let status = if mapping.range.start != mapping.range.end {
|
||||
log::debug!("Buffer {:?} map state -> Active", buffer_id);
|
||||
let host = mapping.op.host;
|
||||
let size = mapping.range.end - mapping.range.start;
|
||||
match super::map_buffer(raw, buffer, mapping.range.start, size, host) {
|
||||
Ok(ptr) => {
|
||||
buffer.map_state = resource::BufferMapState::Active {
|
||||
ptr,
|
||||
range: mapping.range.start..mapping.range.start + size,
|
||||
host,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Mapping failed {:?}", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
buffer.map_state = resource::BufferMapState::Active {
|
||||
ptr: std::ptr::NonNull::dangling(),
|
||||
range: mapping.range,
|
||||
host: mapping.op.host,
|
||||
};
|
||||
Ok(())
|
||||
};
|
||||
pending_callbacks.push((mapping.op, status));
|
||||
}
|
||||
}
|
||||
pending_callbacks
|
||||
}
|
||||
}
|
||||
344
third-party/vendor/wgpu-core/src/device/mod.rs
vendored
Normal file
344
third-party/vendor/wgpu-core/src/device/mod.rs
vendored
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
use crate::{
|
||||
binding_model,
|
||||
hal_api::HalApi,
|
||||
hub::Hub,
|
||||
id,
|
||||
identity::{GlobalIdentityHandlerFactory, Input},
|
||||
resource::{Buffer, BufferAccessResult},
|
||||
resource::{BufferAccessError, BufferMapOperation},
|
||||
Label, DOWNLEVEL_ERROR_MESSAGE,
|
||||
};
|
||||
|
||||
use arrayvec::ArrayVec;
|
||||
use hal::Device as _;
|
||||
use smallvec::SmallVec;
|
||||
use thiserror::Error;
|
||||
use wgt::{BufferAddress, TextureFormat};
|
||||
|
||||
use std::{iter, num::NonZeroU32, ptr};
|
||||
|
||||
pub mod global;
|
||||
mod life;
|
||||
pub mod queue;
|
||||
pub mod resource;
|
||||
#[cfg(any(feature = "trace", feature = "replay"))]
|
||||
pub mod trace;
|
||||
pub use {life::WaitIdleError, resource::Device};
|
||||
|
||||
pub const SHADER_STAGE_COUNT: usize = 3;
|
||||
// Should be large enough for the largest possible texture row. This
|
||||
// value is enough for a 16k texture with float4 format.
|
||||
pub(crate) const ZERO_BUFFER_SIZE: BufferAddress = 512 << 10;
|
||||
|
||||
const CLEANUP_WAIT_MS: u32 = 5000;
|
||||
|
||||
const IMPLICIT_FAILURE: &str = "failed implicit";
|
||||
const EP_FAILURE: &str = "EP is invalid";
|
||||
|
||||
pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub enum HostMap {
|
||||
Read,
|
||||
Write,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq)]
|
||||
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
|
||||
pub(crate) struct AttachmentData<T> {
|
||||
pub colors: ArrayVec<Option<T>, { hal::MAX_COLOR_ATTACHMENTS }>,
|
||||
pub resolves: ArrayVec<T, { hal::MAX_COLOR_ATTACHMENTS }>,
|
||||
pub depth_stencil: Option<T>,
|
||||
}
|
||||
impl<T: PartialEq> Eq for AttachmentData<T> {}
|
||||
impl<T> AttachmentData<T> {
|
||||
pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> {
|
||||
AttachmentData {
|
||||
colors: self.colors.iter().map(|c| c.as_ref().map(&fun)).collect(),
|
||||
resolves: self.resolves.iter().map(&fun).collect(),
|
||||
depth_stencil: self.depth_stencil.as_ref().map(&fun),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum RenderPassCompatibilityCheckType {
|
||||
RenderPipeline,
|
||||
RenderBundle,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq)]
|
||||
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
|
||||
pub(crate) struct RenderPassContext {
|
||||
pub attachments: AttachmentData<TextureFormat>,
|
||||
pub sample_count: u32,
|
||||
pub multiview: Option<NonZeroU32>,
|
||||
}
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum RenderPassCompatibilityError {
|
||||
#[error(
|
||||
"Incompatible color attachments at indices {indices:?}: the RenderPass uses textures with formats {expected:?} but the {ty:?} uses attachments with formats {actual:?}",
|
||||
)]
|
||||
IncompatibleColorAttachment {
|
||||
indices: Vec<usize>,
|
||||
expected: Vec<Option<TextureFormat>>,
|
||||
actual: Vec<Option<TextureFormat>>,
|
||||
ty: RenderPassCompatibilityCheckType,
|
||||
},
|
||||
#[error(
|
||||
"Incompatible depth-stencil attachment format: the RenderPass uses a texture with format {expected:?} but the {ty:?} uses an attachment with format {actual:?}",
|
||||
)]
|
||||
IncompatibleDepthStencilAttachment {
|
||||
expected: Option<TextureFormat>,
|
||||
actual: Option<TextureFormat>,
|
||||
ty: RenderPassCompatibilityCheckType,
|
||||
},
|
||||
#[error(
|
||||
"Incompatible sample count: the RenderPass uses textures with sample count {expected:?} but the {ty:?} uses attachments with format {actual:?}",
|
||||
)]
|
||||
IncompatibleSampleCount {
|
||||
expected: u32,
|
||||
actual: u32,
|
||||
ty: RenderPassCompatibilityCheckType,
|
||||
},
|
||||
#[error("Incompatible multiview setting: the RenderPass uses setting {expected:?} but the {ty:?} uses setting {actual:?}")]
|
||||
IncompatibleMultiview {
|
||||
expected: Option<NonZeroU32>,
|
||||
actual: Option<NonZeroU32>,
|
||||
ty: RenderPassCompatibilityCheckType,
|
||||
},
|
||||
}
|
||||
|
||||
impl RenderPassContext {
|
||||
// Assumes the renderpass only contains one subpass
|
||||
pub(crate) fn check_compatible(
|
||||
&self,
|
||||
other: &Self,
|
||||
ty: RenderPassCompatibilityCheckType,
|
||||
) -> Result<(), RenderPassCompatibilityError> {
|
||||
if self.attachments.colors != other.attachments.colors {
|
||||
let indices = self
|
||||
.attachments
|
||||
.colors
|
||||
.iter()
|
||||
.zip(&other.attachments.colors)
|
||||
.enumerate()
|
||||
.filter_map(|(idx, (left, right))| (left != right).then_some(idx))
|
||||
.collect();
|
||||
return Err(RenderPassCompatibilityError::IncompatibleColorAttachment {
|
||||
indices,
|
||||
expected: self.attachments.colors.iter().cloned().collect(),
|
||||
actual: other.attachments.colors.iter().cloned().collect(),
|
||||
ty,
|
||||
});
|
||||
}
|
||||
if self.attachments.depth_stencil != other.attachments.depth_stencil {
|
||||
return Err(
|
||||
RenderPassCompatibilityError::IncompatibleDepthStencilAttachment {
|
||||
expected: self.attachments.depth_stencil,
|
||||
actual: other.attachments.depth_stencil,
|
||||
ty,
|
||||
},
|
||||
);
|
||||
}
|
||||
if self.sample_count != other.sample_count {
|
||||
return Err(RenderPassCompatibilityError::IncompatibleSampleCount {
|
||||
expected: self.sample_count,
|
||||
actual: other.sample_count,
|
||||
ty,
|
||||
});
|
||||
}
|
||||
if self.multiview != other.multiview {
|
||||
return Err(RenderPassCompatibilityError::IncompatibleMultiview {
|
||||
expected: self.multiview,
|
||||
actual: other.multiview,
|
||||
ty,
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub type BufferMapPendingClosure = (BufferMapOperation, BufferAccessResult);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct UserClosures {
|
||||
pub mappings: Vec<BufferMapPendingClosure>,
|
||||
pub submissions: SmallVec<[queue::SubmittedWorkDoneClosure; 1]>,
|
||||
}
|
||||
|
||||
impl UserClosures {
|
||||
fn extend(&mut self, other: Self) {
|
||||
self.mappings.extend(other.mappings);
|
||||
self.submissions.extend(other.submissions);
|
||||
}
|
||||
|
||||
fn fire(self) {
|
||||
// Note: this logic is specifically moved out of `handle_mapping()` in order to
|
||||
// have nothing locked by the time we execute users callback code.
|
||||
for (operation, status) in self.mappings {
|
||||
operation.callback.call(status);
|
||||
}
|
||||
for closure in self.submissions {
|
||||
closure.call();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_buffer<A: hal::Api>(
|
||||
raw: &A::Device,
|
||||
buffer: &mut Buffer<A>,
|
||||
offset: BufferAddress,
|
||||
size: BufferAddress,
|
||||
kind: HostMap,
|
||||
) -> Result<ptr::NonNull<u8>, BufferAccessError> {
|
||||
let mapping = unsafe {
|
||||
raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size)
|
||||
.map_err(DeviceError::from)?
|
||||
};
|
||||
|
||||
buffer.sync_mapped_writes = match kind {
|
||||
HostMap::Read if !mapping.is_coherent => unsafe {
|
||||
raw.invalidate_mapped_ranges(
|
||||
buffer.raw.as_ref().unwrap(),
|
||||
iter::once(offset..offset + size),
|
||||
);
|
||||
None
|
||||
},
|
||||
HostMap::Write if !mapping.is_coherent => Some(offset..offset + size),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
assert_eq!(offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
|
||||
assert_eq!(size % wgt::COPY_BUFFER_ALIGNMENT, 0);
|
||||
// Zero out uninitialized parts of the mapping. (Spec dictates all resources
|
||||
// behave as if they were initialized with zero)
|
||||
//
|
||||
// If this is a read mapping, ideally we would use a `clear_buffer` command
|
||||
// before reading the data from GPU (i.e. `invalidate_range`). However, this
|
||||
// would require us to kick off and wait for a command buffer or piggy back
|
||||
// on an existing one (the later is likely the only worthwhile option). As
|
||||
// reading uninitialized memory isn't a particular important path to
|
||||
// support, we instead just initialize the memory here and make sure it is
|
||||
// GPU visible, so this happens at max only once for every buffer region.
|
||||
//
|
||||
// If this is a write mapping zeroing out the memory here is the only
|
||||
// reasonable way as all data is pushed to GPU anyways.
|
||||
|
||||
// No need to flush if it is flushed later anyways.
|
||||
let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none();
|
||||
let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) };
|
||||
|
||||
for uninitialized in buffer.initialization_status.drain(offset..(size + offset)) {
|
||||
// The mapping's pointer is already offset, however we track the
|
||||
// uninitialized range relative to the buffer's start.
|
||||
let fill_range =
|
||||
(uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize;
|
||||
mapped[fill_range].fill(0);
|
||||
|
||||
if zero_init_needs_flush_now {
|
||||
unsafe {
|
||||
raw.flush_mapped_ranges(buffer.raw.as_ref().unwrap(), iter::once(uninitialized))
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(mapping.ptr)
|
||||
}
|
||||
|
||||
struct CommandAllocator<A: hal::Api> {
|
||||
free_encoders: Vec<A::CommandEncoder>,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> CommandAllocator<A> {
|
||||
fn acquire_encoder(
|
||||
&mut self,
|
||||
device: &A::Device,
|
||||
queue: &A::Queue,
|
||||
) -> Result<A::CommandEncoder, hal::DeviceError> {
|
||||
match self.free_encoders.pop() {
|
||||
Some(encoder) => Ok(encoder),
|
||||
None => unsafe {
|
||||
let hal_desc = hal::CommandEncoderDescriptor { label: None, queue };
|
||||
device.create_command_encoder(&hal_desc)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn release_encoder(&mut self, encoder: A::CommandEncoder) {
|
||||
self.free_encoders.push(encoder);
|
||||
}
|
||||
|
||||
fn dispose(self, device: &A::Device) {
|
||||
log::info!("Destroying {} command encoders", self.free_encoders.len());
|
||||
for cmd_encoder in self.free_encoders {
|
||||
unsafe {
|
||||
device.destroy_command_encoder(cmd_encoder);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[error("Device is invalid")]
|
||||
pub struct InvalidDevice;
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
pub enum DeviceError {
|
||||
#[error("Parent device is invalid")]
|
||||
Invalid,
|
||||
#[error("Parent device is lost")]
|
||||
Lost,
|
||||
#[error("Not enough memory left")]
|
||||
OutOfMemory,
|
||||
}
|
||||
|
||||
impl From<hal::DeviceError> for DeviceError {
|
||||
fn from(error: hal::DeviceError) -> Self {
|
||||
match error {
|
||||
hal::DeviceError::Lost => DeviceError::Lost,
|
||||
hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[error("Features {0:?} are required but not enabled on the device")]
|
||||
pub struct MissingFeatures(pub wgt::Features);
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[error(
|
||||
"Downlevel flags {0:?} are required but not supported on the device.\n{}",
|
||||
DOWNLEVEL_ERROR_MESSAGE
|
||||
)]
|
||||
pub struct MissingDownlevelFlags(pub wgt::DownlevelFlags);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct ImplicitPipelineContext {
|
||||
pub root_id: id::PipelineLayoutId,
|
||||
pub group_ids: ArrayVec<id::BindGroupLayoutId, { hal::MAX_BIND_GROUPS }>,
|
||||
}
|
||||
|
||||
pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
|
||||
pub root_id: Input<G, id::PipelineLayoutId>,
|
||||
pub group_ids: &'a [Input<G, id::BindGroupLayoutId>],
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> ImplicitPipelineIds<'_, G> {
|
||||
fn prepare<A: HalApi>(self, hub: &Hub<A, G>) -> ImplicitPipelineContext {
|
||||
ImplicitPipelineContext {
|
||||
root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(),
|
||||
group_ids: self
|
||||
.group_ids
|
||||
.iter()
|
||||
.map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id())
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
1452
third-party/vendor/wgpu-core/src/device/queue.rs
vendored
Normal file
1452
third-party/vendor/wgpu-core/src/device/queue.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
3160
third-party/vendor/wgpu-core/src/device/resource.rs
vendored
Normal file
3160
third-party/vendor/wgpu-core/src/device/resource.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
234
third-party/vendor/wgpu-core/src/device/trace.rs
vendored
Normal file
234
third-party/vendor/wgpu-core/src/device/trace.rs
vendored
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
use crate::id;
|
||||
use std::ops::Range;
|
||||
#[cfg(feature = "trace")]
|
||||
use std::{borrow::Cow, io::Write as _};
|
||||
|
||||
//TODO: consider a readable Id that doesn't include the backend
|
||||
|
||||
type FileName = String;
|
||||
|
||||
pub const FILE_NAME: &str = "trace.ron";
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
|
||||
label: crate::Label<'a>,
|
||||
context: &'a super::RenderPassContext,
|
||||
depth_read_only: bool,
|
||||
stencil_read_only: bool,
|
||||
) -> crate::command::RenderBundleEncoderDescriptor<'a> {
|
||||
crate::command::RenderBundleEncoderDescriptor {
|
||||
label,
|
||||
color_formats: Cow::Borrowed(&context.attachments.colors),
|
||||
depth_stencil: context.attachments.depth_stencil.map(|format| {
|
||||
wgt::RenderBundleDepthStencil {
|
||||
format,
|
||||
depth_read_only,
|
||||
stencil_read_only,
|
||||
}
|
||||
}),
|
||||
sample_count: context.sample_count,
|
||||
multiview: context.multiview,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub enum Action<'a> {
|
||||
Init {
|
||||
desc: crate::device::DeviceDescriptor<'a>,
|
||||
backend: wgt::Backend,
|
||||
},
|
||||
ConfigureSurface(
|
||||
id::SurfaceId,
|
||||
wgt::SurfaceConfiguration<Vec<wgt::TextureFormat>>,
|
||||
),
|
||||
CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>),
|
||||
FreeBuffer(id::BufferId),
|
||||
DestroyBuffer(id::BufferId),
|
||||
CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>),
|
||||
FreeTexture(id::TextureId),
|
||||
DestroyTexture(id::TextureId),
|
||||
CreateTextureView {
|
||||
id: id::TextureViewId,
|
||||
parent_id: id::TextureId,
|
||||
desc: crate::resource::TextureViewDescriptor<'a>,
|
||||
},
|
||||
DestroyTextureView(id::TextureViewId),
|
||||
CreateSampler(id::SamplerId, crate::resource::SamplerDescriptor<'a>),
|
||||
DestroySampler(id::SamplerId),
|
||||
GetSurfaceTexture {
|
||||
id: id::TextureId,
|
||||
parent_id: id::SurfaceId,
|
||||
},
|
||||
Present(id::SurfaceId),
|
||||
DiscardSurfaceTexture(id::SurfaceId),
|
||||
CreateBindGroupLayout(
|
||||
id::BindGroupLayoutId,
|
||||
crate::binding_model::BindGroupLayoutDescriptor<'a>,
|
||||
),
|
||||
DestroyBindGroupLayout(id::BindGroupLayoutId),
|
||||
CreatePipelineLayout(
|
||||
id::PipelineLayoutId,
|
||||
crate::binding_model::PipelineLayoutDescriptor<'a>,
|
||||
),
|
||||
DestroyPipelineLayout(id::PipelineLayoutId),
|
||||
CreateBindGroup(
|
||||
id::BindGroupId,
|
||||
crate::binding_model::BindGroupDescriptor<'a>,
|
||||
),
|
||||
DestroyBindGroup(id::BindGroupId),
|
||||
CreateShaderModule {
|
||||
id: id::ShaderModuleId,
|
||||
desc: crate::pipeline::ShaderModuleDescriptor<'a>,
|
||||
data: FileName,
|
||||
},
|
||||
DestroyShaderModule(id::ShaderModuleId),
|
||||
CreateComputePipeline {
|
||||
id: id::ComputePipelineId,
|
||||
desc: crate::pipeline::ComputePipelineDescriptor<'a>,
|
||||
#[cfg_attr(feature = "replay", serde(default))]
|
||||
implicit_context: Option<super::ImplicitPipelineContext>,
|
||||
},
|
||||
DestroyComputePipeline(id::ComputePipelineId),
|
||||
CreateRenderPipeline {
|
||||
id: id::RenderPipelineId,
|
||||
desc: crate::pipeline::RenderPipelineDescriptor<'a>,
|
||||
#[cfg_attr(feature = "replay", serde(default))]
|
||||
implicit_context: Option<super::ImplicitPipelineContext>,
|
||||
},
|
||||
DestroyRenderPipeline(id::RenderPipelineId),
|
||||
CreateRenderBundle {
|
||||
id: id::RenderBundleId,
|
||||
desc: crate::command::RenderBundleEncoderDescriptor<'a>,
|
||||
base: crate::command::BasePass<crate::command::RenderCommand>,
|
||||
},
|
||||
DestroyRenderBundle(id::RenderBundleId),
|
||||
CreateQuerySet {
|
||||
id: id::QuerySetId,
|
||||
desc: crate::resource::QuerySetDescriptor<'a>,
|
||||
},
|
||||
DestroyQuerySet(id::QuerySetId),
|
||||
WriteBuffer {
|
||||
id: id::BufferId,
|
||||
data: FileName,
|
||||
range: Range<wgt::BufferAddress>,
|
||||
queued: bool,
|
||||
},
|
||||
WriteTexture {
|
||||
to: crate::command::ImageCopyTexture,
|
||||
data: FileName,
|
||||
layout: wgt::ImageDataLayout,
|
||||
size: wgt::Extent3d,
|
||||
},
|
||||
Submit(crate::SubmissionIndex, Vec<Command>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub enum Command {
|
||||
CopyBufferToBuffer {
|
||||
src: id::BufferId,
|
||||
src_offset: wgt::BufferAddress,
|
||||
dst: id::BufferId,
|
||||
dst_offset: wgt::BufferAddress,
|
||||
size: wgt::BufferAddress,
|
||||
},
|
||||
CopyBufferToTexture {
|
||||
src: crate::command::ImageCopyBuffer,
|
||||
dst: crate::command::ImageCopyTexture,
|
||||
size: wgt::Extent3d,
|
||||
},
|
||||
CopyTextureToBuffer {
|
||||
src: crate::command::ImageCopyTexture,
|
||||
dst: crate::command::ImageCopyBuffer,
|
||||
size: wgt::Extent3d,
|
||||
},
|
||||
CopyTextureToTexture {
|
||||
src: crate::command::ImageCopyTexture,
|
||||
dst: crate::command::ImageCopyTexture,
|
||||
size: wgt::Extent3d,
|
||||
},
|
||||
ClearBuffer {
|
||||
dst: id::BufferId,
|
||||
offset: wgt::BufferAddress,
|
||||
size: Option<wgt::BufferSize>,
|
||||
},
|
||||
ClearTexture {
|
||||
dst: id::TextureId,
|
||||
subresource_range: wgt::ImageSubresourceRange,
|
||||
},
|
||||
WriteTimestamp {
|
||||
query_set_id: id::QuerySetId,
|
||||
query_index: u32,
|
||||
},
|
||||
ResolveQuerySet {
|
||||
query_set_id: id::QuerySetId,
|
||||
start_query: u32,
|
||||
query_count: u32,
|
||||
destination: id::BufferId,
|
||||
destination_offset: wgt::BufferAddress,
|
||||
},
|
||||
PushDebugGroup(String),
|
||||
PopDebugGroup,
|
||||
InsertDebugMarker(String),
|
||||
RunComputePass {
|
||||
base: crate::command::BasePass<crate::command::ComputeCommand>,
|
||||
},
|
||||
RunRenderPass {
|
||||
base: crate::command::BasePass<crate::command::RenderCommand>,
|
||||
target_colors: Vec<Option<crate::command::RenderPassColorAttachment>>,
|
||||
target_depth_stencil: Option<crate::command::RenderPassDepthStencilAttachment>,
|
||||
},
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
#[derive(Debug)]
|
||||
pub struct Trace {
|
||||
path: std::path::PathBuf,
|
||||
file: std::fs::File,
|
||||
config: ron::ser::PrettyConfig,
|
||||
binary_id: usize,
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
impl Trace {
|
||||
pub fn new(path: &std::path::Path) -> Result<Self, std::io::Error> {
|
||||
log::info!("Tracing into '{:?}'", path);
|
||||
let mut file = std::fs::File::create(path.join(FILE_NAME))?;
|
||||
file.write_all(b"[\n")?;
|
||||
Ok(Self {
|
||||
path: path.to_path_buf(),
|
||||
file,
|
||||
config: ron::ser::PrettyConfig::default(),
|
||||
binary_id: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn make_binary(&mut self, kind: &str, data: &[u8]) -> String {
|
||||
self.binary_id += 1;
|
||||
let name = format!("data{}.{}", self.binary_id, kind);
|
||||
let _ = std::fs::write(self.path.join(&name), data);
|
||||
name
|
||||
}
|
||||
|
||||
pub(crate) fn add(&mut self, action: Action) {
|
||||
match ron::ser::to_string_pretty(&action, self.config.clone()) {
|
||||
Ok(string) => {
|
||||
let _ = writeln!(self.file, "{},", string);
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("RON serialization failure: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
impl Drop for Trace {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.file.write_all(b"]");
|
||||
}
|
||||
}
|
||||
202
third-party/vendor/wgpu-core/src/error.rs
vendored
Normal file
202
third-party/vendor/wgpu-core/src/error.rs
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
use core::fmt;
|
||||
use std::error::Error;
|
||||
|
||||
use crate::{gfx_select, global::Global, identity::IdentityManagerFactory};
|
||||
|
||||
pub struct ErrorFormatter<'a> {
|
||||
writer: &'a mut dyn fmt::Write,
|
||||
global: &'a Global<IdentityManagerFactory>,
|
||||
}
|
||||
|
||||
impl<'a> ErrorFormatter<'a> {
|
||||
pub fn error(&mut self, err: &dyn Error) {
|
||||
writeln!(self.writer, " {err}").expect("Error formatting error");
|
||||
}
|
||||
|
||||
pub fn note(&mut self, note: &dyn fmt::Display) {
|
||||
writeln!(self.writer, " note: {note}").expect("Error formatting error");
|
||||
}
|
||||
|
||||
pub fn label(&mut self, label_key: &str, label_value: &str) {
|
||||
if !label_key.is_empty() && !label_value.is_empty() {
|
||||
self.note(&format!("{label_key} = `{label_value}`"));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bind_group_label(&mut self, id: &crate::id::BindGroupId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.bind_group_label(*id));
|
||||
self.label("bind group", &label);
|
||||
}
|
||||
|
||||
pub fn bind_group_layout_label(&mut self, id: &crate::id::BindGroupLayoutId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.bind_group_layout_label(*id));
|
||||
self.label("bind group layout", &label);
|
||||
}
|
||||
|
||||
pub fn render_pipeline_label(&mut self, id: &crate::id::RenderPipelineId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.render_pipeline_label(*id));
|
||||
self.label("render pipeline", &label);
|
||||
}
|
||||
|
||||
pub fn compute_pipeline_label(&mut self, id: &crate::id::ComputePipelineId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.compute_pipeline_label(*id));
|
||||
self.label("compute pipeline", &label);
|
||||
}
|
||||
|
||||
pub fn buffer_label_with_key(&mut self, id: &crate::id::BufferId, key: &str) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.buffer_label(*id));
|
||||
self.label(key, &label);
|
||||
}
|
||||
|
||||
pub fn buffer_label(&mut self, id: &crate::id::BufferId) {
|
||||
self.buffer_label_with_key(id, "buffer");
|
||||
}
|
||||
|
||||
pub fn texture_label_with_key(&mut self, id: &crate::id::TextureId, key: &str) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.texture_label(*id));
|
||||
self.label(key, &label);
|
||||
}
|
||||
|
||||
pub fn texture_label(&mut self, id: &crate::id::TextureId) {
|
||||
self.texture_label_with_key(id, "texture");
|
||||
}
|
||||
|
||||
pub fn texture_view_label_with_key(&mut self, id: &crate::id::TextureViewId, key: &str) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.texture_view_label(*id));
|
||||
self.label(key, &label);
|
||||
}
|
||||
|
||||
pub fn texture_view_label(&mut self, id: &crate::id::TextureViewId) {
|
||||
self.texture_view_label_with_key(id, "texture view");
|
||||
}
|
||||
|
||||
pub fn sampler_label(&mut self, id: &crate::id::SamplerId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.sampler_label(*id));
|
||||
self.label("sampler", &label);
|
||||
}
|
||||
|
||||
pub fn command_buffer_label(&mut self, id: &crate::id::CommandBufferId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.command_buffer_label(*id));
|
||||
self.label("command buffer", &label);
|
||||
}
|
||||
|
||||
pub fn query_set_label(&mut self, id: &crate::id::QuerySetId) {
|
||||
let global = self.global;
|
||||
let label: String = gfx_select!(id => global.query_set_label(*id));
|
||||
self.label("query set", &label);
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PrettyError: Error + Sized {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format_pretty_any(
|
||||
writer: &mut dyn fmt::Write,
|
||||
global: &Global<IdentityManagerFactory>,
|
||||
error: &(dyn Error + 'static),
|
||||
) {
|
||||
let mut fmt = ErrorFormatter { writer, global };
|
||||
|
||||
if let Some(pretty_err) = error.downcast_ref::<ContextError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::RenderCommandError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::binding_model::CreateBindGroupError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) =
|
||||
error.downcast_ref::<crate::binding_model::CreatePipelineLayoutError>()
|
||||
{
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::ExecutionError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::RenderPassErrorInner>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::RenderPassError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::ComputePassErrorInner>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::ComputePassError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::RenderBundleError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::TransferError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::PassErrorScope>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::track::UsageConflict>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
if let Some(pretty_err) = error.downcast_ref::<crate::command::QueryError>() {
|
||||
return pretty_err.fmt_pretty(&mut fmt);
|
||||
}
|
||||
|
||||
// default
|
||||
fmt.error(error)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContextError {
|
||||
pub string: &'static str,
|
||||
#[cfg(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
))]
|
||||
pub cause: Box<dyn Error + Send + Sync + 'static>,
|
||||
#[cfg(not(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
)))]
|
||||
pub cause: Box<dyn Error + 'static>,
|
||||
pub label_key: &'static str,
|
||||
pub label: String,
|
||||
}
|
||||
|
||||
impl PrettyError for ContextError {
|
||||
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
fmt.label(self.label_key, &self.label);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ContextError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "In {}", self.string)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ContextError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
Some(self.cause.as_ref())
|
||||
}
|
||||
}
|
||||
171
third-party/vendor/wgpu-core/src/global.rs
vendored
Normal file
171
third-party/vendor/wgpu-core/src/global.rs
vendored
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
use crate::{
|
||||
hal_api::HalApi,
|
||||
hub::{HubReport, Hubs},
|
||||
id,
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
instance::{Instance, Surface},
|
||||
registry::Registry,
|
||||
storage::{Element, StorageReport},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct GlobalReport {
|
||||
pub surfaces: StorageReport,
|
||||
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
|
||||
pub vulkan: Option<HubReport>,
|
||||
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
|
||||
pub metal: Option<HubReport>,
|
||||
#[cfg(all(feature = "dx12", windows))]
|
||||
pub dx12: Option<HubReport>,
|
||||
#[cfg(all(feature = "dx11", windows))]
|
||||
pub dx11: Option<HubReport>,
|
||||
#[cfg(feature = "gles")]
|
||||
pub gl: Option<HubReport>,
|
||||
}
|
||||
|
||||
pub struct Global<G: GlobalIdentityHandlerFactory> {
|
||||
pub instance: Instance,
|
||||
pub surfaces: Registry<Surface, id::SurfaceId, G>,
|
||||
pub(crate) hubs: Hubs<G>,
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn new(name: &str, factory: G, instance_desc: wgt::InstanceDescriptor) -> Self {
|
||||
profiling::scope!("Global::new");
|
||||
Self {
|
||||
instance: Instance::new(name, instance_desc),
|
||||
surfaces: Registry::without_backend(&factory, "Surface"),
|
||||
hubs: Hubs::new(&factory),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// Refer to the creation of wgpu-hal Instance for every backend.
|
||||
pub unsafe fn from_hal_instance<A: HalApi>(
|
||||
name: &str,
|
||||
factory: G,
|
||||
hal_instance: A::Instance,
|
||||
) -> Self {
|
||||
profiling::scope!("Global::new");
|
||||
Self {
|
||||
instance: A::create_instance_from_hal(name, hal_instance),
|
||||
surfaces: Registry::without_backend(&factory, "Surface"),
|
||||
hubs: Hubs::new(&factory),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - The raw instance handle returned must not be manually destroyed.
|
||||
pub unsafe fn instance_as_hal<A: HalApi>(&self) -> Option<&A::Instance> {
|
||||
A::instance_as_hal(&self.instance)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - The raw handles obtained from the Instance must not be manually destroyed
|
||||
pub unsafe fn from_instance(factory: G, instance: Instance) -> Self {
|
||||
profiling::scope!("Global::new");
|
||||
Self {
|
||||
instance,
|
||||
surfaces: Registry::without_backend(&factory, "Surface"),
|
||||
hubs: Hubs::new(&factory),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear_backend<A: HalApi>(&self, _dummy: ()) {
|
||||
let mut surface_guard = self.surfaces.data.write();
|
||||
let hub = A::hub(self);
|
||||
// this is used for tests, which keep the adapter
|
||||
hub.clear(&mut surface_guard, false);
|
||||
}
|
||||
|
||||
pub fn generate_report(&self) -> GlobalReport {
|
||||
GlobalReport {
|
||||
surfaces: self.surfaces.data.read().generate_report(),
|
||||
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
|
||||
vulkan: if self.instance.vulkan.is_some() {
|
||||
Some(self.hubs.vulkan.generate_report())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
|
||||
metal: if self.instance.metal.is_some() {
|
||||
Some(self.hubs.metal.generate_report())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
#[cfg(all(feature = "dx12", windows))]
|
||||
dx12: if self.instance.dx12.is_some() {
|
||||
Some(self.hubs.dx12.generate_report())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
#[cfg(all(feature = "dx11", windows))]
|
||||
dx11: if self.instance.dx11.is_some() {
|
||||
Some(self.hubs.dx11.generate_report())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
#[cfg(feature = "gles")]
|
||||
gl: if self.instance.gl.is_some() {
|
||||
Some(self.hubs.gl.generate_report())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Drop for Global<G> {
|
||||
fn drop(&mut self) {
|
||||
profiling::scope!("Global::drop");
|
||||
log::info!("Dropping Global");
|
||||
let mut surface_guard = self.surfaces.data.write();
|
||||
|
||||
// destroy hubs before the instance gets dropped
|
||||
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
|
||||
{
|
||||
self.hubs.vulkan.clear(&mut surface_guard, true);
|
||||
}
|
||||
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
|
||||
{
|
||||
self.hubs.metal.clear(&mut surface_guard, true);
|
||||
}
|
||||
#[cfg(all(feature = "dx12", windows))]
|
||||
{
|
||||
self.hubs.dx12.clear(&mut surface_guard, true);
|
||||
}
|
||||
#[cfg(all(feature = "dx11", windows))]
|
||||
{
|
||||
self.hubs.dx11.clear(&mut surface_guard, true);
|
||||
}
|
||||
#[cfg(feature = "gles")]
|
||||
{
|
||||
self.hubs.gl.clear(&mut surface_guard, true);
|
||||
}
|
||||
|
||||
// destroy surfaces
|
||||
for element in surface_guard.map.drain(..) {
|
||||
if let Element::Occupied(surface, _) = element {
|
||||
self.instance.destroy_surface(surface);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(
|
||||
test,
|
||||
any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
)
|
||||
))]
|
||||
fn _test_send_sync(global: &Global<crate::identity::IdentityManagerFactory>) {
|
||||
fn test_internal<T: Send + Sync>(_: T) {}
|
||||
test_internal(global)
|
||||
}
|
||||
157
third-party/vendor/wgpu-core/src/hal_api.rs
vendored
Normal file
157
third-party/vendor/wgpu-core/src/hal_api.rs
vendored
Normal file
|
|
@ -0,0 +1,157 @@
|
|||
use wgt::Backend;
|
||||
|
||||
use crate::{
|
||||
global::Global,
|
||||
hub::Hub,
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
instance::{HalSurface, Instance, Surface},
|
||||
};
|
||||
|
||||
pub trait HalApi: hal::Api {
|
||||
const VARIANT: Backend;
|
||||
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance;
|
||||
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>;
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G>;
|
||||
fn get_surface(surface: &Surface) -> Option<&HalSurface<Self>>;
|
||||
fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface<Self>>;
|
||||
}
|
||||
|
||||
impl HalApi for hal::api::Empty {
|
||||
const VARIANT: Backend = Backend::Empty;
|
||||
fn create_instance_from_hal(_: &str, _: Self::Instance) -> Instance {
|
||||
unimplemented!("called empty api")
|
||||
}
|
||||
fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> {
|
||||
unimplemented!("called empty api")
|
||||
}
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(_: &Global<G>) -> &Hub<Self, G> {
|
||||
unimplemented!("called empty api")
|
||||
}
|
||||
fn get_surface(_: &Surface) -> Option<&HalSurface<Self>> {
|
||||
unimplemented!("called empty api")
|
||||
}
|
||||
fn get_surface_mut(_: &mut Surface) -> Option<&mut HalSurface<Self>> {
|
||||
unimplemented!("called empty api")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
|
||||
impl HalApi for hal::api::Vulkan {
|
||||
const VARIANT: Backend = Backend::Vulkan;
|
||||
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance {
|
||||
Instance {
|
||||
name: name.to_owned(),
|
||||
vulkan: Some(hal_instance),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
|
||||
instance.vulkan.as_ref()
|
||||
}
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
|
||||
&global.hubs.vulkan
|
||||
}
|
||||
fn get_surface(surface: &Surface) -> Option<&HalSurface<Self>> {
|
||||
surface.vulkan.as_ref()
|
||||
}
|
||||
fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface<Self>> {
|
||||
surface.vulkan.as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
|
||||
impl HalApi for hal::api::Metal {
|
||||
const VARIANT: Backend = Backend::Metal;
|
||||
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance {
|
||||
Instance {
|
||||
name: name.to_owned(),
|
||||
metal: Some(hal_instance),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
|
||||
instance.metal.as_ref()
|
||||
}
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
|
||||
&global.hubs.metal
|
||||
}
|
||||
fn get_surface(surface: &Surface) -> Option<&HalSurface<Self>> {
|
||||
surface.metal.as_ref()
|
||||
}
|
||||
fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface<Self>> {
|
||||
surface.metal.as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "dx12", windows))]
|
||||
impl HalApi for hal::api::Dx12 {
|
||||
const VARIANT: Backend = Backend::Dx12;
|
||||
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance {
|
||||
Instance {
|
||||
name: name.to_owned(),
|
||||
dx12: Some(hal_instance),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
|
||||
instance.dx12.as_ref()
|
||||
}
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
|
||||
&global.hubs.dx12
|
||||
}
|
||||
fn get_surface(surface: &Surface) -> Option<&HalSurface<Self>> {
|
||||
surface.dx12.as_ref()
|
||||
}
|
||||
fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface<Self>> {
|
||||
surface.dx12.as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "dx11", windows))]
|
||||
impl HalApi for hal::api::Dx11 {
|
||||
const VARIANT: Backend = Backend::Dx11;
|
||||
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance {
|
||||
Instance {
|
||||
name: name.to_owned(),
|
||||
dx11: Some(hal_instance),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
|
||||
instance.dx11.as_ref()
|
||||
}
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
|
||||
&global.hubs.dx11
|
||||
}
|
||||
fn get_surface(surface: &Surface) -> Option<&HalSurface<Self>> {
|
||||
surface.dx11.as_ref()
|
||||
}
|
||||
fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface<Self>> {
|
||||
surface.dx11.as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "gles")]
|
||||
impl HalApi for hal::api::Gles {
|
||||
const VARIANT: Backend = Backend::Gl;
|
||||
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance {
|
||||
#[allow(clippy::needless_update)]
|
||||
Instance {
|
||||
name: name.to_owned(),
|
||||
gl: Some(hal_instance),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
|
||||
instance.gl.as_ref()
|
||||
}
|
||||
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
|
||||
&global.hubs.gl
|
||||
}
|
||||
fn get_surface(surface: &Surface) -> Option<&HalSurface<Self>> {
|
||||
surface.gl.as_ref()
|
||||
}
|
||||
fn get_surface_mut(surface: &mut Surface) -> Option<&mut HalSurface<Self>> {
|
||||
surface.gl.as_mut()
|
||||
}
|
||||
}
|
||||
728
third-party/vendor/wgpu-core/src/hub.rs
vendored
Normal file
728
third-party/vendor/wgpu-core/src/hub.rs
vendored
Normal file
|
|
@ -0,0 +1,728 @@
|
|||
/*! Allocating resource ids, and tracking the resources they refer to.
|
||||
|
||||
The `wgpu_core` API uses identifiers of type [`Id<R>`] to refer to
|
||||
resources of type `R`. For example, [`id::DeviceId`] is an alias for
|
||||
`Id<Device<Empty>>`, and [`id::BufferId`] is an alias for
|
||||
`Id<Buffer<Empty>>`. `Id` implements `Copy`, `Hash`, `Eq`, `Ord`, and
|
||||
of course `Debug`.
|
||||
|
||||
Each `Id` contains not only an index for the resource it denotes but
|
||||
also a [`Backend`] indicating which `wgpu` backend it belongs to. You
|
||||
can use the [`gfx_select`] macro to dynamically dispatch on an id's
|
||||
backend to a function specialized at compile time for a specific
|
||||
backend. See that macro's documentation for details.
|
||||
|
||||
`Id`s also incorporate a generation number, for additional validation.
|
||||
|
||||
The resources to which identifiers refer are freed explicitly.
|
||||
Attempting to use an identifier for a resource that has been freed
|
||||
elicits an error result.
|
||||
|
||||
## Assigning ids to resources
|
||||
|
||||
The users of `wgpu_core` generally want resource ids to be assigned
|
||||
in one of two ways:
|
||||
|
||||
- Users like `wgpu` want `wgpu_core` to assign ids to resources itself.
|
||||
For example, `wgpu` expects to call `Global::device_create_buffer`
|
||||
and have the return value indicate the newly created buffer's id.
|
||||
|
||||
- Users like `player` and Firefox want to allocate ids themselves, and
|
||||
pass `Global::device_create_buffer` and friends the id to assign the
|
||||
new resource.
|
||||
|
||||
To accommodate either pattern, `wgpu_core` methods that create
|
||||
resources all expect an `id_in` argument that the caller can use to
|
||||
specify the id, and they all return the id used. For example, the
|
||||
declaration of `Global::device_create_buffer` looks like this:
|
||||
|
||||
```ignore
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
/* ... */
|
||||
pub fn device_create_buffer<A: HalApi>(
|
||||
&self,
|
||||
device_id: id::DeviceId,
|
||||
desc: &resource::BufferDescriptor,
|
||||
id_in: Input<G, id::BufferId>,
|
||||
) -> (id::BufferId, Option<resource::CreateBufferError>) {
|
||||
/* ... */
|
||||
}
|
||||
/* ... */
|
||||
}
|
||||
```
|
||||
|
||||
Users that want to assign resource ids themselves pass in the id they
|
||||
want as the `id_in` argument, whereas users that want `wgpu_core`
|
||||
itself to choose ids always pass `()`. In either case, the id
|
||||
ultimately assigned is returned as the first element of the tuple.
|
||||
|
||||
Producing true identifiers from `id_in` values is the job of an
|
||||
[`IdentityHandler`] implementation, which has an associated type
|
||||
[`Input`] saying what type of `id_in` values it accepts, and a
|
||||
[`process`] method that turns such values into true identifiers of
|
||||
type `I`. There are two kinds of `IdentityHandler`s:
|
||||
|
||||
- Users that want `wgpu_core` to assign ids generally use
|
||||
[`IdentityManager`] ([wrapped in a mutex]). Its `Input` type is
|
||||
`()`, and it tracks assigned ids and generation numbers as
|
||||
necessary. (This is what `wgpu` does.)
|
||||
|
||||
- Users that want to assign ids themselves use an `IdentityHandler`
|
||||
whose `Input` type is `I` itself, and whose `process` method simply
|
||||
passes the `id_in` argument through unchanged. For example, the
|
||||
`player` crate uses an `IdentityPassThrough` type whose `process`
|
||||
method simply adjusts the id's backend (since recordings can be
|
||||
replayed on a different backend than the one they were created on)
|
||||
but passes the rest of the id's content through unchanged.
|
||||
|
||||
Because an `IdentityHandler<I>` can only create ids for a single
|
||||
resource type `I`, constructing a [`Global`] entails constructing a
|
||||
separate `IdentityHandler<I>` for each resource type `I` that the
|
||||
`Global` will manage: an `IdentityHandler<DeviceId>`, an
|
||||
`IdentityHandler<TextureId>`, and so on.
|
||||
|
||||
The [`Global::new`] function could simply take a large collection of
|
||||
`IdentityHandler<I>` implementations as arguments, but that would be
|
||||
ungainly. Instead, `Global::new` expects a `factory` argument that
|
||||
implements the [`GlobalIdentityHandlerFactory`] trait, which extends
|
||||
[`IdentityHandlerFactory<I>`] for each resource id type `I`. This
|
||||
trait, in turn, has a `spawn` method that constructs an
|
||||
`IdentityHandler<I>` for the `Global` to use.
|
||||
|
||||
What this means is that the types of resource creation functions'
|
||||
`id_in` arguments depend on the `Global`'s `G` type parameter. A
|
||||
`Global<G>`'s `IdentityHandler<I>` implementation is:
|
||||
|
||||
```ignore
|
||||
<G as IdentityHandlerFactory<I>>::Filter
|
||||
```
|
||||
|
||||
where `Filter` is an associated type of the `IdentityHandlerFactory` trait.
|
||||
Thus, its `id_in` type is:
|
||||
|
||||
```ignore
|
||||
<<G as IdentityHandlerFactory<I>>::Filter as IdentityHandler<I>>::Input
|
||||
```
|
||||
|
||||
The [`Input<G, I>`] type is an alias for this construction.
|
||||
|
||||
## Id allocation and streaming
|
||||
|
||||
Perhaps surprisingly, allowing users to assign resource ids themselves
|
||||
enables major performance improvements in some applications.
|
||||
|
||||
The `wgpu_core` API is designed for use by Firefox's [WebGPU]
|
||||
implementation. For security, web content and GPU use must be kept
|
||||
segregated in separate processes, with all interaction between them
|
||||
mediated by an inter-process communication protocol. As web content uses
|
||||
the WebGPU API, the content process sends messages to the GPU process,
|
||||
which interacts with the platform's GPU APIs on content's behalf,
|
||||
occasionally sending results back.
|
||||
|
||||
In a classic Rust API, a resource allocation function takes parameters
|
||||
describing the resource to create, and if creation succeeds, it returns
|
||||
the resource id in a `Result::Ok` value. However, this design is a poor
|
||||
fit for the split-process design described above: content must wait for
|
||||
the reply to its buffer-creation message (say) before it can know which
|
||||
id it can use in the next message that uses that buffer. On a common
|
||||
usage pattern, the classic Rust design imposes the latency of a full
|
||||
cross-process round trip.
|
||||
|
||||
We can avoid incurring these round-trip latencies simply by letting the
|
||||
content process assign resource ids itself. With this approach, content
|
||||
can choose an id for the new buffer, send a message to create the
|
||||
buffer, and then immediately send the next message operating on that
|
||||
buffer, since it already knows its id. Allowing content and GPU process
|
||||
activity to be pipelined greatly improves throughput.
|
||||
|
||||
To help propagate errors correctly in this style of usage, when resource
|
||||
creation fails, the id supplied for that resource is marked to indicate
|
||||
as much, allowing subsequent operations using that id to be properly
|
||||
flagged as errors as well.
|
||||
|
||||
[`Backend`]: wgt::Backend
|
||||
[`Global`]: crate::global::Global
|
||||
[`Global::new`]: crate::global::Global::new
|
||||
[`gfx_select`]: crate::gfx_select
|
||||
[`IdentityHandler`]: crate::identity::IdentityHandler
|
||||
[`Input`]: crate::identity::IdentityHandler::Input
|
||||
[`process`]: crate::identity::IdentityHandler::process
|
||||
[`Id<R>`]: crate::id::Id
|
||||
[wrapped in a mutex]: ../identity/trait.IdentityHandler.html#impl-IdentityHandler%3CI%3E-for-Mutex%3CIdentityManager%3E
|
||||
[WebGPU]: https://www.w3.org/TR/webgpu/
|
||||
[`IdentityManager`]: crate::identity::IdentityManager
|
||||
[`Input<G, I>`]: crate::identity::Input
|
||||
[`IdentityHandlerFactory<I>`]: crate::identity::IdentityHandlerFactory
|
||||
*/
|
||||
|
||||
use crate::{
|
||||
binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
|
||||
command::{CommandBuffer, RenderBundle},
|
||||
device::Device,
|
||||
hal_api::HalApi,
|
||||
id,
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
instance::{Adapter, HalSurface, Instance, Surface},
|
||||
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
|
||||
registry::Registry,
|
||||
resource::{Buffer, QuerySet, Sampler, StagingBuffer, Texture, TextureClearMode, TextureView},
|
||||
storage::{Element, Storage, StorageReport},
|
||||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
use std::cell::Cell;
|
||||
use std::{fmt::Debug, marker::PhantomData};
|
||||
|
||||
/// Type system for enforcing the lock order on [`Hub`] fields.
|
||||
///
|
||||
/// If type `A` implements `Access<B>`, that means we are allowed to
|
||||
/// proceed with locking resource `B` after we lock `A`.
|
||||
///
|
||||
/// The implementations of `Access` basically describe the edges in an
|
||||
/// acyclic directed graph of lock transitions. As long as it doesn't have
|
||||
/// cycles, any number of threads can acquire locks along paths through
|
||||
/// the graph without deadlock. That is, if you look at each thread's
|
||||
/// lock acquisitions as steps along a path in the graph, then because
|
||||
/// there are no cycles in the graph, there must always be some thread
|
||||
/// that is able to acquire its next lock, or that is about to release
|
||||
/// a lock. (Assume that no thread just sits on its locks forever.)
|
||||
///
|
||||
/// Locks must be acquired in the following order:
|
||||
///
|
||||
/// - [`Adapter`]
|
||||
/// - [`Device`]
|
||||
/// - [`CommandBuffer`]
|
||||
/// - [`RenderBundle`]
|
||||
/// - [`PipelineLayout`]
|
||||
/// - [`BindGroupLayout`]
|
||||
/// - [`BindGroup`]
|
||||
/// - [`ComputePipeline`]
|
||||
/// - [`RenderPipeline`]
|
||||
/// - [`ShaderModule`]
|
||||
/// - [`Buffer`]
|
||||
/// - [`StagingBuffer`]
|
||||
/// - [`Texture`]
|
||||
/// - [`TextureView`]
|
||||
/// - [`Sampler`]
|
||||
/// - [`QuerySet`]
|
||||
///
|
||||
/// That is, you may only acquire a new lock on a `Hub` field if it
|
||||
/// appears in the list after all the other fields you're already
|
||||
/// holding locks for. When you are holding no locks, you can start
|
||||
/// anywhere.
|
||||
///
|
||||
/// It's fine to add more `Access` implementations as needed, as long
|
||||
/// as you do not introduce a cycle. In other words, as long as there
|
||||
/// is some ordering you can put the resource types in that respects
|
||||
/// the extant `Access` implementations, that's fine.
|
||||
///
|
||||
/// See the documentation for [`Hub`] for more details.
|
||||
pub trait Access<A> {}
|
||||
|
||||
pub enum Root {}
|
||||
|
||||
// These impls are arranged so that the target types (that is, the `T`
|
||||
// in `Access<T>`) appear in locking order.
|
||||
//
|
||||
// TODO: establish an order instead of declaring all the pairs.
|
||||
impl Access<Instance> for Root {}
|
||||
impl Access<Surface> for Root {}
|
||||
impl Access<Surface> for Instance {}
|
||||
impl<A: HalApi> Access<Adapter<A>> for Root {}
|
||||
impl<A: HalApi> Access<Adapter<A>> for Surface {}
|
||||
impl<A: HalApi> Access<Device<A>> for Root {}
|
||||
impl<A: HalApi> Access<Device<A>> for Surface {}
|
||||
impl<A: HalApi> Access<Device<A>> for Adapter<A> {}
|
||||
impl<A: HalApi> Access<CommandBuffer<A>> for Root {}
|
||||
impl<A: HalApi> Access<CommandBuffer<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<RenderBundle<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<RenderBundle<A>> for CommandBuffer<A> {}
|
||||
impl<A: HalApi> Access<PipelineLayout<A>> for Root {}
|
||||
impl<A: HalApi> Access<PipelineLayout<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<PipelineLayout<A>> for RenderBundle<A> {}
|
||||
impl<A: HalApi> Access<BindGroupLayout<A>> for Root {}
|
||||
impl<A: HalApi> Access<BindGroupLayout<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<BindGroupLayout<A>> for PipelineLayout<A> {}
|
||||
impl<A: HalApi> Access<BindGroup<A>> for Root {}
|
||||
impl<A: HalApi> Access<BindGroup<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<BindGroup<A>> for BindGroupLayout<A> {}
|
||||
impl<A: HalApi> Access<BindGroup<A>> for PipelineLayout<A> {}
|
||||
impl<A: HalApi> Access<BindGroup<A>> for CommandBuffer<A> {}
|
||||
impl<A: HalApi> Access<ComputePipeline<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<ComputePipeline<A>> for BindGroup<A> {}
|
||||
impl<A: HalApi> Access<RenderPipeline<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<RenderPipeline<A>> for BindGroup<A> {}
|
||||
impl<A: HalApi> Access<RenderPipeline<A>> for ComputePipeline<A> {}
|
||||
impl<A: HalApi> Access<ShaderModule<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<ShaderModule<A>> for BindGroupLayout<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for Root {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for BindGroupLayout<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for BindGroup<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for CommandBuffer<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for ComputePipeline<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for RenderPipeline<A> {}
|
||||
impl<A: HalApi> Access<Buffer<A>> for QuerySet<A> {}
|
||||
impl<A: HalApi> Access<StagingBuffer<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<Texture<A>> for Root {}
|
||||
impl<A: HalApi> Access<Texture<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<Texture<A>> for Buffer<A> {}
|
||||
impl<A: HalApi> Access<TextureView<A>> for Root {}
|
||||
impl<A: HalApi> Access<TextureView<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<TextureView<A>> for Texture<A> {}
|
||||
impl<A: HalApi> Access<Sampler<A>> for Root {}
|
||||
impl<A: HalApi> Access<Sampler<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<Sampler<A>> for TextureView<A> {}
|
||||
impl<A: HalApi> Access<QuerySet<A>> for Root {}
|
||||
impl<A: HalApi> Access<QuerySet<A>> for Device<A> {}
|
||||
impl<A: HalApi> Access<QuerySet<A>> for CommandBuffer<A> {}
|
||||
impl<A: HalApi> Access<QuerySet<A>> for RenderPipeline<A> {}
|
||||
impl<A: HalApi> Access<QuerySet<A>> for ComputePipeline<A> {}
|
||||
impl<A: HalApi> Access<QuerySet<A>> for Sampler<A> {}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
thread_local! {
|
||||
/// Per-thread state checking `Token<Root>` creation in debug builds.
|
||||
///
|
||||
/// This is the number of `Token` values alive on the current
|
||||
/// thread. Since `Token` creation respects the [`Access`] graph,
|
||||
/// there can never be more tokens alive than there are fields of
|
||||
/// [`Hub`], so a `u8` is plenty.
|
||||
static ACTIVE_TOKEN: Cell<u8> = Cell::new(0);
|
||||
}
|
||||
|
||||
/// A zero-size permission token to lock some fields of [`Hub`].
|
||||
///
|
||||
/// Access to a `Token<T>` grants permission to lock any field of
|
||||
/// [`Hub`] following the one of type [`Registry<T, ...>`], where
|
||||
/// "following" is as defined by the [`Access`] implementations.
|
||||
///
|
||||
/// Calling [`Token::root()`] returns a `Token<Root>`, which grants
|
||||
/// permission to lock any field. Dynamic checks ensure that each
|
||||
/// thread has at most one `Token<Root>` live at a time, in debug
|
||||
/// builds.
|
||||
///
|
||||
/// The locking methods on `Registry<T, ...>` take a `&'t mut
|
||||
/// Token<A>`, and return a fresh `Token<'t, T>` and a lock guard with
|
||||
/// lifetime `'t`, so the caller cannot access their `Token<A>` again
|
||||
/// until they have dropped both the `Token<T>` and the lock guard.
|
||||
///
|
||||
/// Tokens are `!Send`, so one thread can't send its permissions to
|
||||
/// another.
|
||||
pub(crate) struct Token<'a, T: 'a> {
|
||||
// The `*const` makes us `!Send` and `!Sync`.
|
||||
level: PhantomData<&'a *const T>,
|
||||
}
|
||||
|
||||
impl<'a, T> Token<'a, T> {
|
||||
/// Return a new token for a locked field.
|
||||
///
|
||||
/// This should only be used by `Registry` locking methods.
|
||||
pub(crate) fn new() -> Self {
|
||||
#[cfg(debug_assertions)]
|
||||
ACTIVE_TOKEN.with(|active| {
|
||||
let old = active.get();
|
||||
assert_ne!(old, 0, "Root token was dropped");
|
||||
active.set(old + 1);
|
||||
});
|
||||
Self { level: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl Token<'static, Root> {
|
||||
/// Return a `Token<Root>`, granting permission to lock any [`Hub`] field.
|
||||
///
|
||||
/// Debug builds check dynamically that each thread has at most
|
||||
/// one root token at a time.
|
||||
pub fn root() -> Self {
|
||||
#[cfg(debug_assertions)]
|
||||
ACTIVE_TOKEN.with(|active| {
|
||||
assert_eq!(0, active.replace(1), "Root token is already active");
|
||||
});
|
||||
|
||||
Self { level: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for Token<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
#[cfg(debug_assertions)]
|
||||
ACTIVE_TOKEN.with(|active| {
|
||||
let old = active.get();
|
||||
active.set(old - 1);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HubReport {
|
||||
pub adapters: StorageReport,
|
||||
pub devices: StorageReport,
|
||||
pub pipeline_layouts: StorageReport,
|
||||
pub shader_modules: StorageReport,
|
||||
pub bind_group_layouts: StorageReport,
|
||||
pub bind_groups: StorageReport,
|
||||
pub command_buffers: StorageReport,
|
||||
pub render_bundles: StorageReport,
|
||||
pub render_pipelines: StorageReport,
|
||||
pub compute_pipelines: StorageReport,
|
||||
pub query_sets: StorageReport,
|
||||
pub buffers: StorageReport,
|
||||
pub textures: StorageReport,
|
||||
pub texture_views: StorageReport,
|
||||
pub samplers: StorageReport,
|
||||
}
|
||||
|
||||
impl HubReport {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.adapters.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(rustdoc::private_intra_doc_links)]
|
||||
/// All the resources for a particular backend in a [`Global`].
|
||||
///
|
||||
/// To obtain `global`'s `Hub` for some [`HalApi`] backend type `A`,
|
||||
/// call [`A::hub(global)`].
|
||||
///
|
||||
/// ## Locking
|
||||
///
|
||||
/// Each field in `Hub` is a [`Registry`] holding all the values of a
|
||||
/// particular type of resource, all protected by a single [`RwLock`].
|
||||
/// So for example, to access any [`Buffer`], you must acquire a read
|
||||
/// lock on the `Hub`s entire [`buffers`] registry. The lock guard
|
||||
/// gives you access to the `Registry`'s [`Storage`], which you can
|
||||
/// then index with the buffer's id. (Yes, this design causes
|
||||
/// contention; see [#2272].)
|
||||
///
|
||||
/// But most `wgpu` operations require access to several different
|
||||
/// kinds of resource, so you often need to hold locks on several
|
||||
/// different fields of your [`Hub`] simultaneously. To avoid
|
||||
/// deadlock, there is an ordering imposed on the fields, and you may
|
||||
/// only acquire new locks on fields that come *after* all those you
|
||||
/// are already holding locks on, in this ordering. (The ordering is
|
||||
/// described in the documentation for the [`Access`] trait.)
|
||||
///
|
||||
/// We use Rust's type system to statically check that `wgpu_core` can
|
||||
/// only ever acquire locks in the correct order:
|
||||
///
|
||||
/// - A value of type [`Token<T>`] represents proof that the owner
|
||||
/// only holds locks on the `Hub` fields holding resources of type
|
||||
/// `T` or earlier in the lock ordering. A special value of type
|
||||
/// `Token<Root>`, obtained by calling [`Token::root`], represents
|
||||
/// proof that no `Hub` field locks are held.
|
||||
///
|
||||
/// - To lock the `Hub` field holding resources of type `T`, you must
|
||||
/// call its [`read`] or [`write`] methods. These require you to
|
||||
/// pass in a `&mut Token<A>`, for some `A` that implements
|
||||
/// [`Access<T>`]. This implementation exists only if `T` follows `A`
|
||||
/// in the field ordering, which statically ensures that you are
|
||||
/// indeed allowed to lock this new `Hub` field.
|
||||
///
|
||||
/// - The locking methods return both an [`RwLock`] guard that you can
|
||||
/// use to access the field's resources, and a new `Token<T>` value.
|
||||
/// These both borrow from the lifetime of your `Token<A>`, so since
|
||||
/// you passed that by mutable reference, you cannot access it again
|
||||
/// until you drop the new token and lock guard.
|
||||
///
|
||||
/// Because a thread only ever has access to the `Token<T>` for the
|
||||
/// last resource type `T` it holds a lock for, and the `Access` trait
|
||||
/// implementations only permit acquiring locks for types `U` that
|
||||
/// follow `T` in the lock ordering, it is statically impossible for a
|
||||
/// program to violate the locking order.
|
||||
///
|
||||
/// This does assume that threads cannot call `Token<Root>` when they
|
||||
/// already hold locks (dynamically enforced in debug builds) and that
|
||||
/// threads cannot send their `Token`s to other threads (enforced by
|
||||
/// making `Token` neither `Send` nor `Sync`).
|
||||
///
|
||||
/// [`Global`]: crate::global::Global
|
||||
/// [`A::hub(global)`]: HalApi::hub
|
||||
/// [`RwLock`]: parking_lot::RwLock
|
||||
/// [`buffers`]: Hub::buffers
|
||||
/// [`read`]: Registry::read
|
||||
/// [`write`]: Registry::write
|
||||
/// [`Token<T>`]: Token
|
||||
/// [`Access<T>`]: Access
|
||||
/// [#2272]: https://github.com/gfx-rs/wgpu/pull/2272
|
||||
pub struct Hub<A: HalApi, F: GlobalIdentityHandlerFactory> {
|
||||
pub adapters: Registry<Adapter<A>, id::AdapterId, F>,
|
||||
pub devices: Registry<Device<A>, id::DeviceId, F>,
|
||||
pub pipeline_layouts: Registry<PipelineLayout<A>, id::PipelineLayoutId, F>,
|
||||
pub shader_modules: Registry<ShaderModule<A>, id::ShaderModuleId, F>,
|
||||
pub bind_group_layouts: Registry<BindGroupLayout<A>, id::BindGroupLayoutId, F>,
|
||||
pub bind_groups: Registry<BindGroup<A>, id::BindGroupId, F>,
|
||||
pub command_buffers: Registry<CommandBuffer<A>, id::CommandBufferId, F>,
|
||||
pub render_bundles: Registry<RenderBundle<A>, id::RenderBundleId, F>,
|
||||
pub render_pipelines: Registry<RenderPipeline<A>, id::RenderPipelineId, F>,
|
||||
pub compute_pipelines: Registry<ComputePipeline<A>, id::ComputePipelineId, F>,
|
||||
pub query_sets: Registry<QuerySet<A>, id::QuerySetId, F>,
|
||||
pub buffers: Registry<Buffer<A>, id::BufferId, F>,
|
||||
pub staging_buffers: Registry<StagingBuffer<A>, id::StagingBufferId, F>,
|
||||
pub textures: Registry<Texture<A>, id::TextureId, F>,
|
||||
pub texture_views: Registry<TextureView<A>, id::TextureViewId, F>,
|
||||
pub samplers: Registry<Sampler<A>, id::SamplerId, F>,
|
||||
}
|
||||
|
||||
impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
|
||||
fn new(factory: &F) -> Self {
|
||||
Self {
|
||||
adapters: Registry::new(A::VARIANT, factory),
|
||||
devices: Registry::new(A::VARIANT, factory),
|
||||
pipeline_layouts: Registry::new(A::VARIANT, factory),
|
||||
shader_modules: Registry::new(A::VARIANT, factory),
|
||||
bind_group_layouts: Registry::new(A::VARIANT, factory),
|
||||
bind_groups: Registry::new(A::VARIANT, factory),
|
||||
command_buffers: Registry::new(A::VARIANT, factory),
|
||||
render_bundles: Registry::new(A::VARIANT, factory),
|
||||
render_pipelines: Registry::new(A::VARIANT, factory),
|
||||
compute_pipelines: Registry::new(A::VARIANT, factory),
|
||||
query_sets: Registry::new(A::VARIANT, factory),
|
||||
buffers: Registry::new(A::VARIANT, factory),
|
||||
staging_buffers: Registry::new(A::VARIANT, factory),
|
||||
textures: Registry::new(A::VARIANT, factory),
|
||||
texture_views: Registry::new(A::VARIANT, factory),
|
||||
samplers: Registry::new(A::VARIANT, factory),
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: instead of having a hacky `with_adapters` parameter,
|
||||
// we should have `clear_device(device_id)` that specifically destroys
|
||||
// everything related to a logical device.
|
||||
pub(crate) fn clear(
|
||||
&self,
|
||||
surface_guard: &mut Storage<Surface, id::SurfaceId>,
|
||||
with_adapters: bool,
|
||||
) {
|
||||
use crate::resource::TextureInner;
|
||||
use hal::{Device as _, Surface as _};
|
||||
|
||||
let mut devices = self.devices.data.write();
|
||||
for element in devices.map.iter_mut() {
|
||||
if let Element::Occupied(ref mut device, _) = *element {
|
||||
device.prepare_to_die();
|
||||
}
|
||||
}
|
||||
|
||||
// destroy command buffers first, since otherwise DX12 isn't happy
|
||||
for element in self.command_buffers.data.write().map.drain(..) {
|
||||
if let Element::Occupied(command_buffer, _) = element {
|
||||
let device = &devices[command_buffer.device_id.value];
|
||||
device.destroy_command_buffer(command_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
for element in self.samplers.data.write().map.drain(..) {
|
||||
if let Element::Occupied(sampler, _) = element {
|
||||
unsafe {
|
||||
devices[sampler.device_id.value]
|
||||
.raw
|
||||
.destroy_sampler(sampler.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for element in self.texture_views.data.write().map.drain(..) {
|
||||
if let Element::Occupied(texture_view, _) = element {
|
||||
let device = &devices[texture_view.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_texture_view(texture_view.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for element in self.textures.data.write().map.drain(..) {
|
||||
if let Element::Occupied(texture, _) = element {
|
||||
let device = &devices[texture.device_id.value];
|
||||
if let TextureInner::Native { raw: Some(raw) } = texture.inner {
|
||||
unsafe {
|
||||
device.raw.destroy_texture(raw);
|
||||
}
|
||||
}
|
||||
if let TextureClearMode::RenderPass { clear_views, .. } = texture.clear_mode {
|
||||
for view in clear_views {
|
||||
unsafe {
|
||||
device.raw.destroy_texture_view(view);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for element in self.buffers.data.write().map.drain(..) {
|
||||
if let Element::Occupied(buffer, _) = element {
|
||||
//TODO: unmap if needed
|
||||
devices[buffer.device_id.value].destroy_buffer(buffer);
|
||||
}
|
||||
}
|
||||
for element in self.bind_groups.data.write().map.drain(..) {
|
||||
if let Element::Occupied(bind_group, _) = element {
|
||||
let device = &devices[bind_group.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_bind_group(bind_group.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for element in self.shader_modules.data.write().map.drain(..) {
|
||||
if let Element::Occupied(module, _) = element {
|
||||
let device = &devices[module.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_shader_module(module.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
for element in self.bind_group_layouts.data.write().map.drain(..) {
|
||||
if let Element::Occupied(bgl, _) = element {
|
||||
let device = &devices[bgl.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_bind_group_layout(bgl.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
for element in self.pipeline_layouts.data.write().map.drain(..) {
|
||||
if let Element::Occupied(pipeline_layout, _) = element {
|
||||
let device = &devices[pipeline_layout.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_pipeline_layout(pipeline_layout.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
for element in self.compute_pipelines.data.write().map.drain(..) {
|
||||
if let Element::Occupied(pipeline, _) = element {
|
||||
let device = &devices[pipeline.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_compute_pipeline(pipeline.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
for element in self.render_pipelines.data.write().map.drain(..) {
|
||||
if let Element::Occupied(pipeline, _) = element {
|
||||
let device = &devices[pipeline.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_render_pipeline(pipeline.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for element in surface_guard.map.iter_mut() {
|
||||
if let Element::Occupied(ref mut surface, _epoch) = *element {
|
||||
if surface
|
||||
.presentation
|
||||
.as_ref()
|
||||
.map_or(wgt::Backend::Empty, |p| p.backend())
|
||||
!= A::VARIANT
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if let Some(present) = surface.presentation.take() {
|
||||
let device = &devices[present.device_id.value];
|
||||
let suf = A::get_surface_mut(surface);
|
||||
unsafe {
|
||||
suf.unwrap().raw.unconfigure(&device.raw);
|
||||
//TODO: we could destroy the surface here
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for element in self.query_sets.data.write().map.drain(..) {
|
||||
if let Element::Occupied(query_set, _) = element {
|
||||
let device = &devices[query_set.device_id.value];
|
||||
unsafe {
|
||||
device.raw.destroy_query_set(query_set.raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for element in devices.map.drain(..) {
|
||||
if let Element::Occupied(device, _) = element {
|
||||
device.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
if with_adapters {
|
||||
drop(devices);
|
||||
self.adapters.data.write().map.clear();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn surface_unconfigure(
|
||||
&self,
|
||||
device_id: id::Valid<id::DeviceId>,
|
||||
surface: &mut HalSurface<A>,
|
||||
) {
|
||||
use hal::Surface as _;
|
||||
|
||||
let devices = self.devices.data.read();
|
||||
let device = &devices[device_id];
|
||||
unsafe {
|
||||
surface.raw.unconfigure(&device.raw);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_report(&self) -> HubReport {
|
||||
HubReport {
|
||||
adapters: self.adapters.data.read().generate_report(),
|
||||
devices: self.devices.data.read().generate_report(),
|
||||
pipeline_layouts: self.pipeline_layouts.data.read().generate_report(),
|
||||
shader_modules: self.shader_modules.data.read().generate_report(),
|
||||
bind_group_layouts: self.bind_group_layouts.data.read().generate_report(),
|
||||
bind_groups: self.bind_groups.data.read().generate_report(),
|
||||
command_buffers: self.command_buffers.data.read().generate_report(),
|
||||
render_bundles: self.render_bundles.data.read().generate_report(),
|
||||
render_pipelines: self.render_pipelines.data.read().generate_report(),
|
||||
compute_pipelines: self.compute_pipelines.data.read().generate_report(),
|
||||
query_sets: self.query_sets.data.read().generate_report(),
|
||||
buffers: self.buffers.data.read().generate_report(),
|
||||
textures: self.textures.data.read().generate_report(),
|
||||
texture_views: self.texture_views.data.read().generate_report(),
|
||||
samplers: self.samplers.data.read().generate_report(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Hubs<F: GlobalIdentityHandlerFactory> {
|
||||
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
|
||||
pub(crate) vulkan: Hub<hal::api::Vulkan, F>,
|
||||
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
|
||||
pub(crate) metal: Hub<hal::api::Metal, F>,
|
||||
#[cfg(all(feature = "dx12", windows))]
|
||||
pub(crate) dx12: Hub<hal::api::Dx12, F>,
|
||||
#[cfg(all(feature = "dx11", windows))]
|
||||
pub(crate) dx11: Hub<hal::api::Dx11, F>,
|
||||
#[cfg(feature = "gles")]
|
||||
pub(crate) gl: Hub<hal::api::Gles, F>,
|
||||
#[cfg(all(
|
||||
not(all(feature = "vulkan", not(target_arch = "wasm32"))),
|
||||
not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))),
|
||||
not(all(feature = "dx12", windows)),
|
||||
not(all(feature = "dx11", windows)),
|
||||
not(feature = "gles"),
|
||||
))]
|
||||
pub(crate) empty: Hub<hal::api::Empty, F>,
|
||||
}
|
||||
|
||||
impl<F: GlobalIdentityHandlerFactory> Hubs<F> {
|
||||
pub(crate) fn new(factory: &F) -> Self {
|
||||
Self {
|
||||
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
|
||||
vulkan: Hub::new(factory),
|
||||
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
|
||||
metal: Hub::new(factory),
|
||||
#[cfg(all(feature = "dx12", windows))]
|
||||
dx12: Hub::new(factory),
|
||||
#[cfg(all(feature = "dx11", windows))]
|
||||
dx11: Hub::new(factory),
|
||||
#[cfg(feature = "gles")]
|
||||
gl: Hub::new(factory),
|
||||
#[cfg(all(
|
||||
not(all(feature = "vulkan", not(target_arch = "wasm32"))),
|
||||
not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))),
|
||||
not(all(feature = "dx12", windows)),
|
||||
not(all(feature = "dx11", windows)),
|
||||
not(feature = "gles"),
|
||||
))]
|
||||
empty: Hub::new(factory),
|
||||
}
|
||||
}
|
||||
}
|
||||
273
third-party/vendor/wgpu-core/src/id.rs
vendored
Normal file
273
third-party/vendor/wgpu-core/src/id.rs
vendored
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
use crate::{Epoch, Index};
|
||||
use std::{cmp::Ordering, fmt, marker::PhantomData};
|
||||
use wgt::Backend;
|
||||
|
||||
#[cfg(feature = "id32")]
|
||||
type IdType = u32;
|
||||
#[cfg(not(feature = "id32"))]
|
||||
type IdType = u64;
|
||||
#[cfg(feature = "id32")]
|
||||
type NonZeroId = std::num::NonZeroU32;
|
||||
#[cfg(not(feature = "id32"))]
|
||||
type NonZeroId = std::num::NonZeroU64;
|
||||
#[cfg(feature = "id32")]
|
||||
type ZippedIndex = u16;
|
||||
#[cfg(not(feature = "id32"))]
|
||||
type ZippedIndex = Index;
|
||||
|
||||
const INDEX_BITS: usize = std::mem::size_of::<ZippedIndex>() * 8;
|
||||
const EPOCH_BITS: usize = INDEX_BITS - BACKEND_BITS;
|
||||
const BACKEND_BITS: usize = 3;
|
||||
const BACKEND_SHIFT: usize = INDEX_BITS * 2 - BACKEND_BITS;
|
||||
pub const EPOCH_MASK: u32 = (1 << (EPOCH_BITS)) - 1;
|
||||
type Dummy = hal::api::Empty;
|
||||
|
||||
/// An identifier for a wgpu object.
|
||||
///
|
||||
/// An `Id<T>` value identifies a value stored in a [`Global`]'s [`Hub`]'s [`Storage`].
|
||||
/// `Storage` implements [`Index`] and [`IndexMut`], accepting `Id` values as indices.
|
||||
///
|
||||
/// ## Note on `Id` typing
|
||||
///
|
||||
/// You might assume that an `Id<T>` can only be used to retrieve a resource of
|
||||
/// type `T`, but that is not quite the case. The id types in `wgpu-core`'s
|
||||
/// public API ([`TextureId`], for example) can refer to resources belonging to
|
||||
/// any backend, but the corresponding resource types ([`Texture<A>`], for
|
||||
/// example) are always parameterized by a specific backend `A`.
|
||||
///
|
||||
/// So the `T` in `Id<T>` is usually a resource type like `Texture<Empty>`,
|
||||
/// where [`Empty`] is the `wgpu_hal` dummy back end. These empty types are
|
||||
/// never actually used, beyond just making sure you access each `Storage` with
|
||||
/// the right kind of identifier. The members of [`Hub<A>`] pair up each
|
||||
/// `X<Empty>` type with the resource type `X<A>`, for some specific backend
|
||||
/// `A`.
|
||||
///
|
||||
/// [`Global`]: crate::global::Global
|
||||
/// [`Hub`]: crate::hub::Hub
|
||||
/// [`Hub<A>`]: crate::hub::Hub
|
||||
/// [`Storage`]: crate::storage::Storage
|
||||
/// [`Texture<A>`]: crate::resource::Texture
|
||||
/// [`Index`]: std::ops::Index
|
||||
/// [`IndexMut`]: std::ops::IndexMut
|
||||
/// [`Registry`]: crate::hub::Registry
|
||||
/// [`Empty`]: hal::api::Empty
|
||||
#[repr(transparent)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize), serde(into = "SerialId"))]
|
||||
#[cfg_attr(
|
||||
feature = "replay",
|
||||
derive(serde::Deserialize),
|
||||
serde(from = "SerialId")
|
||||
)]
|
||||
#[cfg_attr(
|
||||
all(feature = "serde", not(feature = "trace")),
|
||||
derive(serde::Serialize)
|
||||
)]
|
||||
#[cfg_attr(
|
||||
all(feature = "serde", not(feature = "replay")),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
pub struct Id<T>(NonZeroId, PhantomData<T>);
|
||||
|
||||
// This type represents Id in a more readable (and editable) way.
|
||||
#[allow(dead_code)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
enum SerialId {
|
||||
// The only variant forces RON to not ignore "Id"
|
||||
Id(Index, Epoch, Backend),
|
||||
}
|
||||
#[cfg(feature = "trace")]
|
||||
impl<T> From<Id<T>> for SerialId {
|
||||
fn from(id: Id<T>) -> Self {
|
||||
let (index, epoch, backend) = id.unzip();
|
||||
Self::Id(index, epoch, backend)
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "replay")]
|
||||
impl<T> From<SerialId> for Id<T> {
|
||||
fn from(id: SerialId) -> Self {
|
||||
match id {
|
||||
SerialId::Id(index, epoch, backend) => TypedId::zip(index, epoch, backend),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Id<T> {
|
||||
/// # Safety
|
||||
///
|
||||
/// The raw id must be valid for the type.
|
||||
pub unsafe fn from_raw(raw: NonZeroId) -> Self {
|
||||
Self(raw, PhantomData)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn dummy(index: u32) -> Valid<Self> {
|
||||
Valid(Id::zip(index, 1, Backend::Empty))
|
||||
}
|
||||
|
||||
pub fn backend(self) -> Backend {
|
||||
match self.0.get() >> (BACKEND_SHIFT) as u8 {
|
||||
0 => Backend::Empty,
|
||||
1 => Backend::Vulkan,
|
||||
2 => Backend::Metal,
|
||||
3 => Backend::Dx12,
|
||||
4 => Backend::Dx11,
|
||||
5 => Backend::Gl,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Copy for Id<T> {}
|
||||
|
||||
impl<T> Clone for Id<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0, PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Id<T> {
|
||||
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.unzip().fmt(formatter)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> std::hash::Hash for Id<T> {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
self.0.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> PartialEq for Id<T> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.0 == other.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Eq for Id<T> {}
|
||||
|
||||
impl<T> PartialOrd for Id<T> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
self.0.partial_cmp(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Ord for Id<T> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.0.cmp(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// An internal ID that has been checked to point to
|
||||
/// a valid object in the storages.
|
||||
#[repr(transparent)]
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub(crate) struct Valid<I>(pub I);
|
||||
|
||||
/// Trait carrying methods for direct `Id` access.
|
||||
///
|
||||
/// Most `wgpu-core` clients should not use this trait. Unusual clients that
|
||||
/// need to construct `Id` values directly, or access their components, like the
|
||||
/// WGPU recording player, may use this trait to do so.
|
||||
pub trait TypedId: Copy {
|
||||
fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self;
|
||||
fn unzip(self) -> (Index, Epoch, Backend);
|
||||
fn into_raw(self) -> NonZeroId;
|
||||
}
|
||||
|
||||
#[allow(trivial_numeric_casts)]
|
||||
impl<T> TypedId for Id<T> {
|
||||
fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self {
|
||||
assert_eq!(0, epoch >> EPOCH_BITS);
|
||||
assert_eq!(0, (index as IdType) >> INDEX_BITS);
|
||||
let v = index as IdType
|
||||
| ((epoch as IdType) << INDEX_BITS)
|
||||
| ((backend as IdType) << BACKEND_SHIFT);
|
||||
Id(NonZeroId::new(v).unwrap(), PhantomData)
|
||||
}
|
||||
|
||||
fn unzip(self) -> (Index, Epoch, Backend) {
|
||||
(
|
||||
(self.0.get() as ZippedIndex) as Index,
|
||||
(((self.0.get() >> INDEX_BITS) as ZippedIndex) & (EPOCH_MASK as ZippedIndex)) as Index,
|
||||
self.backend(),
|
||||
)
|
||||
}
|
||||
|
||||
fn into_raw(self) -> NonZeroId {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub type AdapterId = Id<crate::instance::Adapter<Dummy>>;
|
||||
pub type SurfaceId = Id<crate::instance::Surface>;
|
||||
// Device
|
||||
pub type DeviceId = Id<crate::device::Device<Dummy>>;
|
||||
pub type QueueId = DeviceId;
|
||||
// Resource
|
||||
pub type BufferId = Id<crate::resource::Buffer<Dummy>>;
|
||||
pub type StagingBufferId = Id<crate::resource::StagingBuffer<Dummy>>;
|
||||
pub type TextureViewId = Id<crate::resource::TextureView<Dummy>>;
|
||||
pub type TextureId = Id<crate::resource::Texture<Dummy>>;
|
||||
pub type SamplerId = Id<crate::resource::Sampler<Dummy>>;
|
||||
// Binding model
|
||||
pub type BindGroupLayoutId = Id<crate::binding_model::BindGroupLayout<Dummy>>;
|
||||
pub type PipelineLayoutId = Id<crate::binding_model::PipelineLayout<Dummy>>;
|
||||
pub type BindGroupId = Id<crate::binding_model::BindGroup<Dummy>>;
|
||||
// Pipeline
|
||||
pub type ShaderModuleId = Id<crate::pipeline::ShaderModule<Dummy>>;
|
||||
pub type RenderPipelineId = Id<crate::pipeline::RenderPipeline<Dummy>>;
|
||||
pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
|
||||
// Command
|
||||
pub type CommandEncoderId = CommandBufferId;
|
||||
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
|
||||
pub type RenderPassEncoderId = *mut crate::command::RenderPass;
|
||||
pub type ComputePassEncoderId = *mut crate::command::ComputePass;
|
||||
pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder;
|
||||
pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
|
||||
pub type QuerySetId = Id<crate::resource::QuerySet<Dummy>>;
|
||||
|
||||
#[test]
|
||||
fn test_id_backend() {
|
||||
for &b in &[
|
||||
Backend::Empty,
|
||||
Backend::Vulkan,
|
||||
Backend::Metal,
|
||||
Backend::Dx12,
|
||||
Backend::Dx11,
|
||||
Backend::Gl,
|
||||
] {
|
||||
let id: Id<()> = Id::zip(1, 0, b);
|
||||
let (_id, _epoch, backend) = id.unzip();
|
||||
assert_eq!(id.backend(), b);
|
||||
assert_eq!(backend, b);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_id() {
|
||||
let last_index = ((1u64 << INDEX_BITS) - 1) as Index;
|
||||
let indexes = [1, last_index / 2 - 1, last_index / 2 + 1, last_index];
|
||||
let epochs = [1, EPOCH_MASK / 2 - 1, EPOCH_MASK / 2 + 1, EPOCH_MASK];
|
||||
let backends = [
|
||||
Backend::Empty,
|
||||
Backend::Vulkan,
|
||||
Backend::Metal,
|
||||
Backend::Dx12,
|
||||
Backend::Dx11,
|
||||
Backend::Gl,
|
||||
];
|
||||
for &i in &indexes {
|
||||
for &e in &epochs {
|
||||
for &b in &backends {
|
||||
let id: Id<()> = Id::zip(i, e, b);
|
||||
let (index, epoch, backend) = id.unzip();
|
||||
assert_eq!(index, i);
|
||||
assert_eq!(epoch, e);
|
||||
assert_eq!(backend, b);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
183
third-party/vendor/wgpu-core/src/identity.rs
vendored
Normal file
183
third-party/vendor/wgpu-core/src/identity.rs
vendored
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
use parking_lot::Mutex;
|
||||
use wgt::Backend;
|
||||
|
||||
use crate::{id, Epoch, Index};
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// A simple structure to allocate [`Id`] identifiers.
|
||||
///
|
||||
/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`]
|
||||
/// marks an id as dead; it will never be returned again by `alloc`.
|
||||
///
|
||||
/// Use `IdentityManager::default` to construct new instances.
|
||||
///
|
||||
/// `IdentityManager` returns `Id`s whose index values are suitable for use as
|
||||
/// indices into a `Storage<T>` that holds those ids' referents:
|
||||
///
|
||||
/// - Every live id has a distinct index value. Each live id's index selects a
|
||||
/// distinct element in the vector.
|
||||
///
|
||||
/// - `IdentityManager` prefers low index numbers. If you size your vector to
|
||||
/// accommodate the indices produced here, the vector's length will reflect
|
||||
/// the highwater mark of actual occupancy.
|
||||
///
|
||||
/// - `IdentityManager` reuses the index values of freed ids before returning
|
||||
/// ids with new index values. Freed vector entries get reused.
|
||||
///
|
||||
/// See the module-level documentation for an overview of how this
|
||||
/// fits together.
|
||||
///
|
||||
/// [`Id`]: crate::id::Id
|
||||
/// [`Backend`]: wgt::Backend;
|
||||
/// [`alloc`]: IdentityManager::alloc
|
||||
/// [`free`]: IdentityManager::free
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IdentityManager {
|
||||
/// Available index values. If empty, then `epochs.len()` is the next index
|
||||
/// to allocate.
|
||||
free: Vec<Index>,
|
||||
|
||||
/// The next or currently-live epoch value associated with each `Id` index.
|
||||
///
|
||||
/// If there is a live id with index `i`, then `epochs[i]` is its epoch; any
|
||||
/// id with the same index but an older epoch is dead.
|
||||
///
|
||||
/// If index `i` is currently unused, `epochs[i]` is the epoch to use in its
|
||||
/// next `Id`.
|
||||
epochs: Vec<Epoch>,
|
||||
}
|
||||
|
||||
impl IdentityManager {
|
||||
/// Allocate a fresh, never-before-seen id with the given `backend`.
|
||||
///
|
||||
/// The backend is incorporated into the id, so that ids allocated with
|
||||
/// different `backend` values are always distinct.
|
||||
pub fn alloc<I: id::TypedId>(&mut self, backend: Backend) -> I {
|
||||
match self.free.pop() {
|
||||
Some(index) => I::zip(index, self.epochs[index as usize], backend),
|
||||
None => {
|
||||
let epoch = 1;
|
||||
let id = I::zip(self.epochs.len() as Index, epoch, backend);
|
||||
self.epochs.push(epoch);
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Free `id`. It will never be returned from `alloc` again.
|
||||
pub fn free<I: id::TypedId + Debug>(&mut self, id: I) {
|
||||
let (index, epoch, _backend) = id.unzip();
|
||||
let pe = &mut self.epochs[index as usize];
|
||||
assert_eq!(*pe, epoch);
|
||||
// If the epoch reaches EOL, the index doesn't go
|
||||
// into the free list, will never be reused again.
|
||||
if epoch < id::EPOCH_MASK {
|
||||
*pe = epoch + 1;
|
||||
self.free.push(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A type that can build true ids from proto-ids, and free true ids.
|
||||
///
|
||||
/// For some implementations, the true id is based on the proto-id.
|
||||
/// The caller is responsible for providing well-allocated proto-ids.
|
||||
///
|
||||
/// For other implementations, the proto-id carries no information
|
||||
/// (it's `()`, say), and this `IdentityHandler` type takes care of
|
||||
/// allocating a fresh true id.
|
||||
///
|
||||
/// See the module-level documentation for details.
|
||||
pub trait IdentityHandler<I>: Debug {
|
||||
/// The type of proto-id consumed by this filter, to produce a true id.
|
||||
type Input: Clone + Debug;
|
||||
|
||||
/// Given a proto-id value `id`, return a true id for `backend`.
|
||||
fn process(&self, id: Self::Input, backend: Backend) -> I;
|
||||
|
||||
/// Free the true id `id`.
|
||||
fn free(&self, id: I);
|
||||
}
|
||||
|
||||
impl<I: id::TypedId + Debug> IdentityHandler<I> for Mutex<IdentityManager> {
|
||||
type Input = ();
|
||||
fn process(&self, _id: Self::Input, backend: Backend) -> I {
|
||||
self.lock().alloc(backend)
|
||||
}
|
||||
fn free(&self, id: I) {
|
||||
self.lock().free(id)
|
||||
}
|
||||
}
|
||||
|
||||
/// A type that can produce [`IdentityHandler`] filters for ids of type `I`.
|
||||
///
|
||||
/// See the module-level documentation for details.
|
||||
pub trait IdentityHandlerFactory<I> {
|
||||
/// The type of filter this factory constructs.
|
||||
///
|
||||
/// "Filter" and "handler" seem to both mean the same thing here:
|
||||
/// something that can produce true ids from proto-ids.
|
||||
type Filter: IdentityHandler<I>;
|
||||
|
||||
/// Create an [`IdentityHandler<I>`] implementation that can
|
||||
/// transform proto-ids into ids of type `I`.
|
||||
///
|
||||
/// [`IdentityHandler<I>`]: IdentityHandler
|
||||
fn spawn(&self) -> Self::Filter;
|
||||
}
|
||||
|
||||
/// A global identity handler factory based on [`IdentityManager`].
|
||||
///
|
||||
/// Each of this type's `IdentityHandlerFactory<I>::spawn` methods
|
||||
/// returns a `Mutex<IdentityManager<I>>`, which allocates fresh `I`
|
||||
/// ids itself, and takes `()` as its proto-id type.
|
||||
#[derive(Debug)]
|
||||
pub struct IdentityManagerFactory;
|
||||
|
||||
impl<I: id::TypedId + Debug> IdentityHandlerFactory<I> for IdentityManagerFactory {
|
||||
type Filter = Mutex<IdentityManager>;
|
||||
fn spawn(&self) -> Self::Filter {
|
||||
Mutex::new(IdentityManager::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// A factory that can build [`IdentityHandler`]s for all resource
|
||||
/// types.
|
||||
pub trait GlobalIdentityHandlerFactory:
|
||||
IdentityHandlerFactory<id::AdapterId>
|
||||
+ IdentityHandlerFactory<id::DeviceId>
|
||||
+ IdentityHandlerFactory<id::PipelineLayoutId>
|
||||
+ IdentityHandlerFactory<id::ShaderModuleId>
|
||||
+ IdentityHandlerFactory<id::BindGroupLayoutId>
|
||||
+ IdentityHandlerFactory<id::BindGroupId>
|
||||
+ IdentityHandlerFactory<id::CommandBufferId>
|
||||
+ IdentityHandlerFactory<id::RenderBundleId>
|
||||
+ IdentityHandlerFactory<id::RenderPipelineId>
|
||||
+ IdentityHandlerFactory<id::ComputePipelineId>
|
||||
+ IdentityHandlerFactory<id::QuerySetId>
|
||||
+ IdentityHandlerFactory<id::BufferId>
|
||||
+ IdentityHandlerFactory<id::StagingBufferId>
|
||||
+ IdentityHandlerFactory<id::TextureId>
|
||||
+ IdentityHandlerFactory<id::TextureViewId>
|
||||
+ IdentityHandlerFactory<id::SamplerId>
|
||||
+ IdentityHandlerFactory<id::SurfaceId>
|
||||
{
|
||||
}
|
||||
|
||||
impl GlobalIdentityHandlerFactory for IdentityManagerFactory {}
|
||||
|
||||
pub type Input<G, I> = <<G as IdentityHandlerFactory<I>>::Filter as IdentityHandler<I>>::Input;
|
||||
|
||||
#[test]
|
||||
fn test_epoch_end_of_life() {
|
||||
use id::TypedId as _;
|
||||
let mut man = IdentityManager::default();
|
||||
man.epochs.push(id::EPOCH_MASK);
|
||||
man.free.push(0);
|
||||
let id1 = man.alloc::<id::BufferId>(Backend::Empty);
|
||||
assert_eq!(id1.unzip().0, 0);
|
||||
man.free(id1);
|
||||
let id2 = man.alloc::<id::BufferId>(Backend::Empty);
|
||||
// confirm that the index 0 is no longer re-used
|
||||
assert_eq!(id2.unzip().0, 1);
|
||||
}
|
||||
35
third-party/vendor/wgpu-core/src/init_tracker/buffer.rs
vendored
Normal file
35
third-party/vendor/wgpu-core/src/init_tracker/buffer.rs
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
use super::{InitTracker, MemoryInitKind};
|
||||
use crate::id::BufferId;
|
||||
use std::ops::Range;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct BufferInitTrackerAction {
|
||||
pub id: BufferId,
|
||||
pub range: Range<wgt::BufferAddress>,
|
||||
pub kind: MemoryInitKind,
|
||||
}
|
||||
|
||||
pub(crate) type BufferInitTracker = InitTracker<wgt::BufferAddress>;
|
||||
|
||||
impl BufferInitTracker {
|
||||
/// Checks if an action has/requires any effect on the initialization status
|
||||
/// and shrinks its range if possible.
|
||||
pub(crate) fn check_action(
|
||||
&self,
|
||||
action: &BufferInitTrackerAction,
|
||||
) -> Option<BufferInitTrackerAction> {
|
||||
self.create_action(action.id, action.range.clone(), action.kind)
|
||||
}
|
||||
|
||||
/// Creates an action if it would have any effect on the initialization
|
||||
/// status and shrinks the range if possible.
|
||||
pub(crate) fn create_action(
|
||||
&self,
|
||||
id: BufferId,
|
||||
query_range: Range<wgt::BufferAddress>,
|
||||
kind: MemoryInitKind,
|
||||
) -> Option<BufferInitTrackerAction> {
|
||||
self.check(query_range)
|
||||
.map(|range| BufferInitTrackerAction { id, range, kind })
|
||||
}
|
||||
}
|
||||
380
third-party/vendor/wgpu-core/src/init_tracker/mod.rs
vendored
Normal file
380
third-party/vendor/wgpu-core/src/init_tracker/mod.rs
vendored
Normal file
|
|
@ -0,0 +1,380 @@
|
|||
/*! Lazy initialization of texture and buffer memory.
|
||||
|
||||
The WebGPU specification requires all texture & buffer memory to be
|
||||
zero initialized on first read. To avoid unnecessary inits, we track
|
||||
the initialization status of every resource and perform inits lazily.
|
||||
|
||||
The granularity is different for buffers and textures:
|
||||
|
||||
- Buffer: Byte granularity to support usecases with large, partially
|
||||
bound buffers well.
|
||||
|
||||
- Texture: Mip-level per layer. That is, a 2D surface is either
|
||||
completely initialized or not, subrects are not tracked.
|
||||
|
||||
Every use of a buffer/texture generates a InitTrackerAction which are
|
||||
recorded and later resolved at queue submit by merging them with the
|
||||
current state and each other in execution order.
|
||||
|
||||
It is important to note that from the point of view of the memory init
|
||||
system there are two kind of writes:
|
||||
|
||||
- **Full writes**: Any kind of memcpy operation. These cause a
|
||||
`MemoryInitKind.ImplicitlyInitialized` action.
|
||||
|
||||
- **(Potentially) partial writes**: For example, write use in a
|
||||
Shader. The system is not able to determine if a resource is fully
|
||||
initialized afterwards but is no longer allowed to perform any
|
||||
clears, therefore this leads to a
|
||||
`MemoryInitKind.ImplicitlyInitialized` action, exactly like a read
|
||||
would.
|
||||
|
||||
*/
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use std::{fmt, iter, ops::Range};
|
||||
|
||||
mod buffer;
|
||||
mod texture;
|
||||
|
||||
pub(crate) use buffer::{BufferInitTracker, BufferInitTrackerAction};
|
||||
pub(crate) use texture::{
|
||||
has_copy_partial_init_tracker_coverage, TextureInitRange, TextureInitTracker,
|
||||
TextureInitTrackerAction,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) enum MemoryInitKind {
|
||||
// The memory range is going to be written by an already initialized source,
|
||||
// thus doesn't need extra attention other than marking as initialized.
|
||||
ImplicitlyInitialized,
|
||||
// The memory range is going to be read, therefore needs to ensure prior
|
||||
// initialization.
|
||||
NeedsInitializedMemory,
|
||||
}
|
||||
|
||||
// Most of the time a resource is either fully uninitialized (one element) or
|
||||
// initialized (zero elements).
|
||||
type UninitializedRangeVec<Idx> = SmallVec<[Range<Idx>; 1]>;
|
||||
|
||||
/// Tracks initialization status of a linear range from 0..size
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct InitTracker<Idx: Ord + Copy + Default> {
|
||||
// Ordered, non overlapping list of all uninitialized ranges.
|
||||
uninitialized_ranges: UninitializedRangeVec<Idx>,
|
||||
}
|
||||
|
||||
pub(crate) struct InitTrackerDrain<'a, Idx: fmt::Debug + Ord + Copy> {
|
||||
uninitialized_ranges: &'a mut UninitializedRangeVec<Idx>,
|
||||
drain_range: Range<Idx>,
|
||||
first_index: usize,
|
||||
next_index: usize,
|
||||
}
|
||||
|
||||
impl<'a, Idx> Iterator for InitTrackerDrain<'a, Idx>
|
||||
where
|
||||
Idx: fmt::Debug + Ord + Copy,
|
||||
{
|
||||
type Item = Range<Idx>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if let Some(r) = self
|
||||
.uninitialized_ranges
|
||||
.get(self.next_index)
|
||||
.and_then(|range| {
|
||||
if range.start < self.drain_range.end {
|
||||
Some(range.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
{
|
||||
self.next_index += 1;
|
||||
Some(r.start.max(self.drain_range.start)..r.end.min(self.drain_range.end))
|
||||
} else {
|
||||
let num_affected = self.next_index - self.first_index;
|
||||
if num_affected == 0 {
|
||||
return None;
|
||||
}
|
||||
let first_range = &mut self.uninitialized_ranges[self.first_index];
|
||||
|
||||
// Split one "big" uninitialized range?
|
||||
if num_affected == 1
|
||||
&& first_range.start < self.drain_range.start
|
||||
&& first_range.end > self.drain_range.end
|
||||
{
|
||||
let old_start = first_range.start;
|
||||
first_range.start = self.drain_range.end;
|
||||
self.uninitialized_ranges
|
||||
.insert(self.first_index, old_start..self.drain_range.start);
|
||||
}
|
||||
// Adjust border ranges and delete everything in-between.
|
||||
else {
|
||||
let remove_start = if first_range.start >= self.drain_range.start {
|
||||
self.first_index
|
||||
} else {
|
||||
first_range.end = self.drain_range.start;
|
||||
self.first_index + 1
|
||||
};
|
||||
|
||||
let last_range = &mut self.uninitialized_ranges[self.next_index - 1];
|
||||
let remove_end = if last_range.end <= self.drain_range.end {
|
||||
self.next_index
|
||||
} else {
|
||||
last_range.start = self.drain_range.end;
|
||||
self.next_index - 1
|
||||
};
|
||||
|
||||
self.uninitialized_ranges.drain(remove_start..remove_end);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, Idx> Drop for InitTrackerDrain<'a, Idx>
|
||||
where
|
||||
Idx: fmt::Debug + Ord + Copy,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if self.next_index <= self.first_index {
|
||||
for _ in self {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Idx> InitTracker<Idx>
|
||||
where
|
||||
Idx: fmt::Debug + Ord + Copy + Default,
|
||||
{
|
||||
pub(crate) fn new(size: Idx) -> Self {
|
||||
Self {
|
||||
uninitialized_ranges: iter::once(Idx::default()..size).collect(),
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if there's any uninitialized ranges within a query.
|
||||
//
|
||||
// If there are any, the range returned a the subrange of the query_range
|
||||
// that contains all these uninitialized regions. Returned range may be
|
||||
// larger than necessary (tradeoff for making this function O(log n))
|
||||
pub(crate) fn check(&self, query_range: Range<Idx>) -> Option<Range<Idx>> {
|
||||
let index = self
|
||||
.uninitialized_ranges
|
||||
.partition_point(|r| r.end <= query_range.start);
|
||||
self.uninitialized_ranges
|
||||
.get(index)
|
||||
.and_then(|start_range| {
|
||||
if start_range.start < query_range.end {
|
||||
let start = start_range.start.max(query_range.start);
|
||||
match self.uninitialized_ranges.get(index + 1) {
|
||||
Some(next_range) => {
|
||||
if next_range.start < query_range.end {
|
||||
// Would need to keep iterating for more
|
||||
// accurate upper bound. Don't do that here.
|
||||
Some(start..query_range.end)
|
||||
} else {
|
||||
Some(start..start_range.end.min(query_range.end))
|
||||
}
|
||||
}
|
||||
None => Some(start..start_range.end.min(query_range.end)),
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Drains uninitialized ranges in a query range.
|
||||
pub(crate) fn drain(&mut self, drain_range: Range<Idx>) -> InitTrackerDrain<Idx> {
|
||||
let index = self
|
||||
.uninitialized_ranges
|
||||
.partition_point(|r| r.end <= drain_range.start);
|
||||
InitTrackerDrain {
|
||||
drain_range,
|
||||
uninitialized_ranges: &mut self.uninitialized_ranges,
|
||||
first_index: index,
|
||||
next_index: index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InitTracker<u32> {
|
||||
// Makes a single entry uninitialized if not already uninitialized
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn discard(&mut self, pos: u32) {
|
||||
// first range where end>=idx
|
||||
let r_idx = self.uninitialized_ranges.partition_point(|r| r.end < pos);
|
||||
if let Some(r) = self.uninitialized_ranges.get(r_idx) {
|
||||
// Extend range at end
|
||||
if r.end == pos {
|
||||
// merge with next?
|
||||
if let Some(right) = self.uninitialized_ranges.get(r_idx + 1) {
|
||||
if right.start == pos + 1 {
|
||||
self.uninitialized_ranges[r_idx] = r.start..right.end;
|
||||
self.uninitialized_ranges.remove(r_idx + 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
self.uninitialized_ranges[r_idx] = r.start..(pos + 1);
|
||||
} else if r.start > pos {
|
||||
// may still extend range at beginning
|
||||
if r.start == pos + 1 {
|
||||
self.uninitialized_ranges[r_idx] = pos..r.end;
|
||||
} else {
|
||||
// previous range end must be smaller than idx, therefore no merge possible
|
||||
self.uninitialized_ranges.push(pos..(pos + 1));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.uninitialized_ranges.push(pos..(pos + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::ops::Range;
|
||||
|
||||
type Tracker = super::InitTracker<u32>;
|
||||
|
||||
#[test]
|
||||
fn check_for_newly_created_tracker() {
|
||||
let tracker = Tracker::new(10);
|
||||
assert_eq!(tracker.check(0..10), Some(0..10));
|
||||
assert_eq!(tracker.check(0..3), Some(0..3));
|
||||
assert_eq!(tracker.check(3..4), Some(3..4));
|
||||
assert_eq!(tracker.check(4..10), Some(4..10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_for_drained_tracker() {
|
||||
let mut tracker = Tracker::new(10);
|
||||
tracker.drain(0..10);
|
||||
assert_eq!(tracker.check(0..10), None);
|
||||
assert_eq!(tracker.check(0..3), None);
|
||||
assert_eq!(tracker.check(3..4), None);
|
||||
assert_eq!(tracker.check(4..10), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_for_partially_filled_tracker() {
|
||||
let mut tracker = Tracker::new(25);
|
||||
// Two regions of uninitialized memory
|
||||
tracker.drain(0..5);
|
||||
tracker.drain(10..15);
|
||||
tracker.drain(20..25);
|
||||
|
||||
assert_eq!(tracker.check(0..25), Some(5..25)); // entire range
|
||||
|
||||
assert_eq!(tracker.check(0..5), None); // left non-overlapping
|
||||
assert_eq!(tracker.check(3..8), Some(5..8)); // left overlapping region
|
||||
assert_eq!(tracker.check(3..17), Some(5..17)); // left overlapping region + contained region
|
||||
|
||||
// right overlapping region + contained region (yes, doesn't fix range end!)
|
||||
assert_eq!(tracker.check(8..22), Some(8..22));
|
||||
// right overlapping region
|
||||
assert_eq!(tracker.check(17..22), Some(17..20));
|
||||
// right non-overlapping
|
||||
assert_eq!(tracker.check(20..25), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drain_already_drained() {
|
||||
let mut tracker = Tracker::new(30);
|
||||
tracker.drain(10..20);
|
||||
|
||||
// Overlapping with non-cleared
|
||||
tracker.drain(5..15); // Left overlap
|
||||
tracker.drain(15..25); // Right overlap
|
||||
tracker.drain(0..30); // Inner overlap
|
||||
|
||||
// Clear fully cleared
|
||||
tracker.drain(0..30);
|
||||
|
||||
assert_eq!(tracker.check(0..30), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drain_never_returns_ranges_twice_for_same_range() {
|
||||
let mut tracker = Tracker::new(19);
|
||||
assert_eq!(tracker.drain(0..19).count(), 1);
|
||||
assert_eq!(tracker.drain(0..19).count(), 0);
|
||||
|
||||
let mut tracker = Tracker::new(17);
|
||||
assert_eq!(tracker.drain(5..8).count(), 1);
|
||||
assert_eq!(tracker.drain(5..8).count(), 0);
|
||||
assert_eq!(tracker.drain(1..3).count(), 1);
|
||||
assert_eq!(tracker.drain(1..3).count(), 0);
|
||||
assert_eq!(tracker.drain(7..13).count(), 1);
|
||||
assert_eq!(tracker.drain(7..13).count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drain_splits_ranges_correctly() {
|
||||
let mut tracker = Tracker::new(1337);
|
||||
assert_eq!(
|
||||
tracker.drain(21..42).collect::<Vec<Range<u32>>>(),
|
||||
vec![21..42]
|
||||
);
|
||||
assert_eq!(
|
||||
tracker.drain(900..1000).collect::<Vec<Range<u32>>>(),
|
||||
vec![900..1000]
|
||||
);
|
||||
|
||||
// Splitted ranges.
|
||||
assert_eq!(
|
||||
tracker.drain(5..1003).collect::<Vec<Range<u32>>>(),
|
||||
vec![5..21, 42..900, 1000..1003]
|
||||
);
|
||||
assert_eq!(
|
||||
tracker.drain(0..1337).collect::<Vec<Range<u32>>>(),
|
||||
vec![0..5, 1003..1337]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn discard_adds_range_on_cleared() {
|
||||
let mut tracker = Tracker::new(10);
|
||||
tracker.drain(0..10);
|
||||
tracker.discard(0);
|
||||
tracker.discard(5);
|
||||
tracker.discard(9);
|
||||
assert_eq!(tracker.check(0..1), Some(0..1));
|
||||
assert_eq!(tracker.check(1..5), None);
|
||||
assert_eq!(tracker.check(5..6), Some(5..6));
|
||||
assert_eq!(tracker.check(6..9), None);
|
||||
assert_eq!(tracker.check(9..10), Some(9..10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn discard_does_nothing_on_uncleared() {
|
||||
let mut tracker = Tracker::new(10);
|
||||
tracker.discard(0);
|
||||
tracker.discard(5);
|
||||
tracker.discard(9);
|
||||
assert_eq!(tracker.uninitialized_ranges.len(), 1);
|
||||
assert_eq!(tracker.uninitialized_ranges[0], 0..10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn discard_extends_ranges() {
|
||||
let mut tracker = Tracker::new(10);
|
||||
tracker.drain(3..7);
|
||||
tracker.discard(2);
|
||||
tracker.discard(7);
|
||||
assert_eq!(tracker.uninitialized_ranges.len(), 2);
|
||||
assert_eq!(tracker.uninitialized_ranges[0], 0..3);
|
||||
assert_eq!(tracker.uninitialized_ranges[1], 7..10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn discard_merges_ranges() {
|
||||
let mut tracker = Tracker::new(10);
|
||||
tracker.drain(3..4);
|
||||
tracker.discard(3);
|
||||
assert_eq!(tracker.uninitialized_ranges.len(), 1);
|
||||
assert_eq!(tracker.uninitialized_ranges[0], 0..10);
|
||||
}
|
||||
}
|
||||
103
third-party/vendor/wgpu-core/src/init_tracker/texture.rs
vendored
Normal file
103
third-party/vendor/wgpu-core/src/init_tracker/texture.rs
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
use super::{InitTracker, MemoryInitKind};
|
||||
use crate::{id::TextureId, track::TextureSelector};
|
||||
use arrayvec::ArrayVec;
|
||||
use std::ops::Range;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct TextureInitRange {
|
||||
pub(crate) mip_range: Range<u32>,
|
||||
// Strictly array layers. We do *not* track volume slices separately.
|
||||
pub(crate) layer_range: Range<u32>,
|
||||
}
|
||||
|
||||
// Returns true if a copy operation doesn't fully cover the texture init
|
||||
// tracking granularity. I.e. if this function returns true for a pending copy
|
||||
// operation, the target texture needs to be ensured to be initialized first!
|
||||
pub(crate) fn has_copy_partial_init_tracker_coverage(
|
||||
copy_size: &wgt::Extent3d,
|
||||
mip_level: u32,
|
||||
desc: &wgt::TextureDescriptor<(), Vec<wgt::TextureFormat>>,
|
||||
) -> bool {
|
||||
let target_size = desc.mip_level_size(mip_level).unwrap();
|
||||
copy_size.width != target_size.width
|
||||
|| copy_size.height != target_size.height
|
||||
|| (desc.dimension == wgt::TextureDimension::D3
|
||||
&& copy_size.depth_or_array_layers != target_size.depth_or_array_layers)
|
||||
}
|
||||
|
||||
impl From<TextureSelector> for TextureInitRange {
|
||||
fn from(selector: TextureSelector) -> Self {
|
||||
TextureInitRange {
|
||||
mip_range: selector.mips,
|
||||
layer_range: selector.layers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct TextureInitTrackerAction {
|
||||
pub(crate) id: TextureId,
|
||||
pub(crate) range: TextureInitRange,
|
||||
pub(crate) kind: MemoryInitKind,
|
||||
}
|
||||
|
||||
pub(crate) type TextureLayerInitTracker = InitTracker<u32>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TextureInitTracker {
|
||||
pub mips: ArrayVec<TextureLayerInitTracker, { hal::MAX_MIP_LEVELS as usize }>,
|
||||
}
|
||||
|
||||
impl TextureInitTracker {
|
||||
pub(crate) fn new(mip_level_count: u32, depth_or_array_layers: u32) -> Self {
|
||||
TextureInitTracker {
|
||||
mips: std::iter::repeat(TextureLayerInitTracker::new(depth_or_array_layers))
|
||||
.take(mip_level_count as usize)
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_action(
|
||||
&self,
|
||||
action: &TextureInitTrackerAction,
|
||||
) -> Option<TextureInitTrackerAction> {
|
||||
let mut mip_range_start = std::usize::MAX;
|
||||
let mut mip_range_end = std::usize::MIN;
|
||||
let mut layer_range_start = std::u32::MAX;
|
||||
let mut layer_range_end = std::u32::MIN;
|
||||
|
||||
for (i, mip_tracker) in self
|
||||
.mips
|
||||
.iter()
|
||||
.enumerate()
|
||||
.take(action.range.mip_range.end as usize)
|
||||
.skip(action.range.mip_range.start as usize)
|
||||
{
|
||||
if let Some(uninitialized_layer_range) =
|
||||
mip_tracker.check(action.range.layer_range.clone())
|
||||
{
|
||||
mip_range_start = mip_range_start.min(i);
|
||||
mip_range_end = i + 1;
|
||||
layer_range_start = layer_range_start.min(uninitialized_layer_range.start);
|
||||
layer_range_end = layer_range_end.max(uninitialized_layer_range.end);
|
||||
};
|
||||
}
|
||||
|
||||
if mip_range_start < mip_range_end && layer_range_start < layer_range_end {
|
||||
Some(TextureInitTrackerAction {
|
||||
id: action.id,
|
||||
range: TextureInitRange {
|
||||
mip_range: mip_range_start as u32..mip_range_end as u32,
|
||||
layer_range: layer_range_start..layer_range_end,
|
||||
},
|
||||
kind: action.kind,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn discard(&mut self, mip_level: u32, layer: u32) {
|
||||
self.mips[mip_level as usize].discard(layer);
|
||||
}
|
||||
}
|
||||
1168
third-party/vendor/wgpu-core/src/instance.rs
vendored
Normal file
1168
third-party/vendor/wgpu-core/src/instance.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
460
third-party/vendor/wgpu-core/src/lib.rs
vendored
Normal file
460
third-party/vendor/wgpu-core/src/lib.rs
vendored
Normal file
|
|
@ -0,0 +1,460 @@
|
|||
/*! This library safely implements WebGPU on native platforms.
|
||||
* It is designed for integration into browsers, as well as wrapping
|
||||
* into other language-specific user-friendly libraries.
|
||||
*/
|
||||
|
||||
// When we have no backends, we end up with a lot of dead or otherwise unreachable code.
|
||||
#![cfg_attr(
|
||||
all(
|
||||
not(all(feature = "vulkan", not(target_arch = "wasm32"))),
|
||||
not(all(feature = "metal", any(target_os = "macos", target_os = "ios"))),
|
||||
not(all(feature = "dx12", windows)),
|
||||
not(all(feature = "dx11", windows)),
|
||||
not(feature = "gles"),
|
||||
),
|
||||
allow(unused, clippy::let_and_return)
|
||||
)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
#![allow(
|
||||
// It is much clearer to assert negative conditions with eq! false
|
||||
clippy::bool_assert_comparison,
|
||||
// We use loops for getting early-out of scope without closures.
|
||||
clippy::never_loop,
|
||||
// We don't use syntax sugar where it's not necessary.
|
||||
clippy::match_like_matches_macro,
|
||||
// Redundant matching is more explicit.
|
||||
clippy::redundant_pattern_matching,
|
||||
// Explicit lifetimes are often easier to reason about.
|
||||
clippy::needless_lifetimes,
|
||||
// No need for defaults in the internal types.
|
||||
clippy::new_without_default,
|
||||
// Needless updates are more scaleable, easier to play with features.
|
||||
clippy::needless_update,
|
||||
// Need many arguments for some core functions to be able to re-use code in many situations.
|
||||
clippy::too_many_arguments,
|
||||
// For some reason `rustc` can warn about these in const generics even
|
||||
// though they are required.
|
||||
unused_braces,
|
||||
// Clashes with clippy::pattern_type_mismatch
|
||||
clippy::needless_borrowed_reference,
|
||||
)]
|
||||
#![warn(
|
||||
trivial_casts,
|
||||
trivial_numeric_casts,
|
||||
unsafe_op_in_unsafe_fn,
|
||||
unused_extern_crates,
|
||||
unused_qualifications,
|
||||
// We don't match on a reference, unless required.
|
||||
clippy::pattern_type_mismatch,
|
||||
)]
|
||||
|
||||
pub mod binding_model;
|
||||
pub mod command;
|
||||
mod conv;
|
||||
pub mod device;
|
||||
pub mod error;
|
||||
pub mod global;
|
||||
pub mod hal_api;
|
||||
pub mod hub;
|
||||
pub mod id;
|
||||
pub mod identity;
|
||||
mod init_tracker;
|
||||
pub mod instance;
|
||||
pub mod pipeline;
|
||||
pub mod present;
|
||||
pub mod registry;
|
||||
pub mod resource;
|
||||
pub mod storage;
|
||||
mod track;
|
||||
mod validation;
|
||||
|
||||
pub use hal::{api, MAX_BIND_GROUPS, MAX_COLOR_ATTACHMENTS, MAX_VERTEX_BUFFERS};
|
||||
|
||||
use atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use std::{borrow::Cow, os::raw::c_char, ptr, sync::atomic};
|
||||
|
||||
/// The index of a queue submission.
|
||||
///
|
||||
/// These are the values stored in `Device::fence`.
|
||||
type SubmissionIndex = hal::FenceValue;
|
||||
|
||||
type Index = u32;
|
||||
type Epoch = u32;
|
||||
|
||||
pub type RawString = *const c_char;
|
||||
pub type Label<'a> = Option<Cow<'a, str>>;
|
||||
|
||||
trait LabelHelpers<'a> {
|
||||
fn borrow_option(&'a self) -> Option<&'a str>;
|
||||
fn borrow_or_default(&'a self) -> &'a str;
|
||||
}
|
||||
impl<'a> LabelHelpers<'a> for Label<'a> {
|
||||
fn borrow_option(&'a self) -> Option<&'a str> {
|
||||
self.as_ref().map(|cow| cow.as_ref())
|
||||
}
|
||||
fn borrow_or_default(&'a self) -> &'a str {
|
||||
self.borrow_option().unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reference count object that is 1:1 with each reference.
|
||||
///
|
||||
/// All the clones of a given `RefCount` point to the same
|
||||
/// heap-allocated atomic reference count. When the count drops to
|
||||
/// zero, only the count is freed. No other automatic cleanup takes
|
||||
/// place; this is just a reference count, not a smart pointer.
|
||||
///
|
||||
/// `RefCount` values are created only by [`LifeGuard::new`] and by
|
||||
/// `Clone`, so every `RefCount` is implicitly tied to some
|
||||
/// [`LifeGuard`].
|
||||
#[derive(Debug)]
|
||||
struct RefCount(ptr::NonNull<AtomicUsize>);
|
||||
|
||||
unsafe impl Send for RefCount {}
|
||||
unsafe impl Sync for RefCount {}
|
||||
|
||||
impl RefCount {
|
||||
const MAX: usize = 1 << 24;
|
||||
|
||||
/// Construct a new `RefCount`, with an initial count of 1.
|
||||
fn new() -> RefCount {
|
||||
let bx = Box::new(AtomicUsize::new(1));
|
||||
Self(unsafe { ptr::NonNull::new_unchecked(Box::into_raw(bx)) })
|
||||
}
|
||||
|
||||
fn load(&self) -> usize {
|
||||
unsafe { self.0.as_ref() }.load(Ordering::Acquire)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for RefCount {
|
||||
fn clone(&self) -> Self {
|
||||
let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel);
|
||||
assert!(old_size < Self::MAX);
|
||||
Self(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RefCount {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
if self.0.as_ref().fetch_sub(1, Ordering::AcqRel) == 1 {
|
||||
drop(Box::from_raw(self.0.as_ptr()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reference count object that tracks multiple references.
|
||||
/// Unlike `RefCount`, it's manually inc()/dec() called.
|
||||
#[derive(Debug)]
|
||||
struct MultiRefCount(AtomicUsize);
|
||||
|
||||
impl MultiRefCount {
|
||||
fn new() -> Self {
|
||||
Self(AtomicUsize::new(1))
|
||||
}
|
||||
|
||||
fn inc(&self) {
|
||||
self.0.fetch_add(1, Ordering::AcqRel);
|
||||
}
|
||||
|
||||
fn dec_and_check_empty(&self) -> bool {
|
||||
self.0.fetch_sub(1, Ordering::AcqRel) == 1
|
||||
}
|
||||
}
|
||||
|
||||
/// Information needed to decide when it's safe to free some wgpu-core
|
||||
/// resource.
|
||||
///
|
||||
/// Each type representing a `wgpu-core` resource, like [`Device`],
|
||||
/// [`Buffer`], etc., contains a `LifeGuard` which indicates whether
|
||||
/// it is safe to free.
|
||||
///
|
||||
/// A resource may need to be retained for any of several reasons:
|
||||
///
|
||||
/// - The user may hold a reference to it (via a `wgpu::Buffer`, say).
|
||||
///
|
||||
/// - Other resources may depend on it (a texture view's backing
|
||||
/// texture, for example).
|
||||
///
|
||||
/// - It may be used by commands sent to the GPU that have not yet
|
||||
/// finished execution.
|
||||
///
|
||||
/// [`Device`]: device::Device
|
||||
/// [`Buffer`]: resource::Buffer
|
||||
#[derive(Debug)]
|
||||
pub struct LifeGuard {
|
||||
/// `RefCount` for the user's reference to this resource.
|
||||
///
|
||||
/// When the user first creates a `wgpu-core` resource, this `RefCount` is
|
||||
/// created along with the resource's `LifeGuard`. When the user drops the
|
||||
/// resource, we swap this out for `None`. Note that the resource may
|
||||
/// still be held alive by other resources.
|
||||
///
|
||||
/// Any `Stored<T>` value holds a clone of this `RefCount` along with the id
|
||||
/// of a `T` resource.
|
||||
ref_count: Option<RefCount>,
|
||||
|
||||
/// The index of the last queue submission in which the resource
|
||||
/// was used.
|
||||
///
|
||||
/// Each queue submission is fenced and assigned an index number
|
||||
/// sequentially. Thus, when a queue submission completes, we know any
|
||||
/// resources used in that submission and any lower-numbered submissions are
|
||||
/// no longer in use by the GPU.
|
||||
submission_index: AtomicUsize,
|
||||
|
||||
/// The `label` from the descriptor used to create the resource.
|
||||
#[cfg(debug_assertions)]
|
||||
pub(crate) label: String,
|
||||
}
|
||||
|
||||
impl LifeGuard {
|
||||
#[allow(unused_variables)]
|
||||
fn new(label: &str) -> Self {
|
||||
Self {
|
||||
ref_count: Some(RefCount::new()),
|
||||
submission_index: AtomicUsize::new(0),
|
||||
#[cfg(debug_assertions)]
|
||||
label: label.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_ref(&self) -> RefCount {
|
||||
self.ref_count.clone().unwrap()
|
||||
}
|
||||
|
||||
/// Record that this resource will be used by the queue submission with the
|
||||
/// given index.
|
||||
///
|
||||
/// Returns `true` if the resource is still held by the user.
|
||||
fn use_at(&self, submit_index: SubmissionIndex) -> bool {
|
||||
self.submission_index
|
||||
.store(submit_index as _, Ordering::Release);
|
||||
self.ref_count.is_some()
|
||||
}
|
||||
|
||||
fn life_count(&self) -> SubmissionIndex {
|
||||
self.submission_index.load(Ordering::Acquire) as _
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct Stored<T> {
|
||||
value: id::Valid<T>,
|
||||
ref_count: RefCount,
|
||||
}
|
||||
|
||||
const DOWNLEVEL_WARNING_MESSAGE: &str = "The underlying API or device in use does not \
|
||||
support enough features to be a fully compliant implementation of WebGPU. A subset of the features can still be used. \
|
||||
If you are running this program on native and not in a browser and wish to limit the features you use to the supported subset, \
|
||||
call Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \
|
||||
platform supports.";
|
||||
const DOWNLEVEL_ERROR_MESSAGE: &str = "This is not an invalid use of WebGPU: the underlying API or device does not \
|
||||
support enough features to be a fully compliant implementation. A subset of the features can still be used. \
|
||||
If you are running this program on native and not in a browser and wish to work around this issue, call \
|
||||
Adapter::downlevel_properties or Device::downlevel_properties to get a listing of the features the current \
|
||||
platform supports.";
|
||||
|
||||
// #[cfg] attributes in exported macros are interesting!
|
||||
//
|
||||
// The #[cfg] conditions in a macro's expansion are evaluated using the
|
||||
// configuration options (features, target architecture and os, etc.) in force
|
||||
// where the macro is *used*, not where it is *defined*. That is, if crate A
|
||||
// defines a macro like this:
|
||||
//
|
||||
// #[macro_export]
|
||||
// macro_rules! if_bleep {
|
||||
// { } => {
|
||||
// #[cfg(feature = "bleep")]
|
||||
// bleep();
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// and then crate B uses it like this:
|
||||
//
|
||||
// fn f() {
|
||||
// if_bleep! { }
|
||||
// }
|
||||
//
|
||||
// then it is crate B's `"bleep"` feature, not crate A's, that determines
|
||||
// whether the macro expands to a function call or an empty statement. The
|
||||
// entire configuration predicate is evaluated in the use's context, not the
|
||||
// definition's.
|
||||
//
|
||||
// Since `wgpu-core` selects back ends using features, we need to make sure the
|
||||
// arms of the `gfx_select!` macro are pruned according to `wgpu-core`'s
|
||||
// features, not those of whatever crate happens to be using `gfx_select!`. This
|
||||
// means we can't use `#[cfg]` attributes in `gfx_select!`s definition itself.
|
||||
// Instead, for each backend, `gfx_select!` must use a macro whose definition is
|
||||
// selected by `#[cfg]` in `wgpu-core`. The configuration predicate is still
|
||||
// evaluated when the macro is used; we've just moved the `#[cfg]` into a macro
|
||||
// used by `wgpu-core` itself.
|
||||
|
||||
/// Define an exported macro named `$public` that expands to an expression if
|
||||
/// the feature `$feature` is enabled, or to a panic otherwise.
|
||||
///
|
||||
/// This is used in the definition of `gfx_select!`, to dispatch the
|
||||
/// call to the appropriate backend, but panic if that backend was not
|
||||
/// compiled in.
|
||||
///
|
||||
/// For a call like this:
|
||||
///
|
||||
/// ```ignore
|
||||
/// define_backend_caller! { name, private, "feature" if cfg_condition }
|
||||
/// ```
|
||||
///
|
||||
/// define a macro `name`, used like this:
|
||||
///
|
||||
/// ```ignore
|
||||
/// name!(expr)
|
||||
/// ```
|
||||
///
|
||||
/// that expands to `expr` if `#[cfg(cfg_condition)]` is enabled, or a
|
||||
/// panic otherwise. The panic message complains that `"feature"` is
|
||||
/// not enabled.
|
||||
///
|
||||
/// Because of odd technical limitations on exporting macros expanded
|
||||
/// by other macros, you must supply both a public-facing name for the
|
||||
/// macro and a private name, `$private`, which is never used
|
||||
/// outside this macro. For details:
|
||||
/// <https://github.com/rust-lang/rust/pull/52234#issuecomment-976702997>
|
||||
macro_rules! define_backend_caller {
|
||||
{ $public:ident, $private:ident, $feature:literal if $cfg:meta } => {
|
||||
#[cfg($cfg)]
|
||||
#[macro_export]
|
||||
macro_rules! $private {
|
||||
( $call:expr ) => ( $call )
|
||||
}
|
||||
|
||||
#[cfg(not($cfg))]
|
||||
#[macro_export]
|
||||
macro_rules! $private {
|
||||
( $call:expr ) => (
|
||||
panic!("Identifier refers to disabled backend feature {:?}", $feature)
|
||||
)
|
||||
}
|
||||
|
||||
// See note about rust-lang#52234 above.
|
||||
#[doc(hidden)] pub use $private as $public;
|
||||
}
|
||||
}
|
||||
|
||||
// Define a macro for each `gfx_select!` match arm. For example,
|
||||
//
|
||||
// gfx_if_vulkan!(expr)
|
||||
//
|
||||
// expands to `expr` if the `"vulkan"` feature is enabled, or to a panic
|
||||
// otherwise.
|
||||
define_backend_caller! { gfx_if_vulkan, gfx_if_vulkan_hidden, "vulkan" if all(feature = "vulkan", not(target_arch = "wasm32")) }
|
||||
define_backend_caller! { gfx_if_metal, gfx_if_metal_hidden, "metal" if all(feature = "metal", any(target_os = "macos", target_os = "ios")) }
|
||||
define_backend_caller! { gfx_if_dx12, gfx_if_dx12_hidden, "dx12" if all(feature = "dx12", windows) }
|
||||
define_backend_caller! { gfx_if_dx11, gfx_if_dx11_hidden, "dx11" if all(feature = "dx11", windows) }
|
||||
define_backend_caller! { gfx_if_gles, gfx_if_gles_hidden, "gles" if feature = "gles" }
|
||||
|
||||
/// Dispatch on an [`Id`]'s backend to a backend-generic method.
|
||||
///
|
||||
/// Uses of this macro have the form:
|
||||
///
|
||||
/// ```ignore
|
||||
///
|
||||
/// gfx_select!(id => value.method(args...))
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
/// This expands to an expression that calls `value.method::<A>(args...)` for
|
||||
/// the backend `A` selected by `id`. The expansion matches on `id.backend()`,
|
||||
/// with an arm for each backend type in [`wgpu_types::Backend`] which calls the
|
||||
/// specialization of `method` for the given backend. This allows resource
|
||||
/// identifiers to select backends dynamically, even though many `wgpu_core`
|
||||
/// methods are compiled and optimized for a specific back end.
|
||||
///
|
||||
/// This macro is typically used to call methods on [`wgpu_core::global::Global`],
|
||||
/// many of which take a single `hal::Api` type parameter. For example, to
|
||||
/// create a new buffer on the device indicated by `device_id`, one would say:
|
||||
///
|
||||
/// ```ignore
|
||||
/// gfx_select!(device_id => global.device_create_buffer(device_id, ...))
|
||||
/// ```
|
||||
///
|
||||
/// where the `device_create_buffer` method is defined like this:
|
||||
///
|
||||
/// ```ignore
|
||||
/// impl<...> Global<...> {
|
||||
/// pub fn device_create_buffer<A: hal::Api>(&self, ...) -> ...
|
||||
/// { ... }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// That `gfx_select!` call uses `device_id`'s backend to select the right
|
||||
/// backend type `A` for a call to `Global::device_create_buffer<A>`.
|
||||
///
|
||||
/// However, there's nothing about this macro that is specific to `global::Global`.
|
||||
/// For example, Firefox's embedding of `wgpu_core` defines its own types with
|
||||
/// methods that take `hal::Api` type parameters. Firefox uses `gfx_select!` to
|
||||
/// dynamically dispatch to the right specialization based on the resource's id.
|
||||
///
|
||||
/// [`wgpu_types::Backend`]: wgt::Backend
|
||||
/// [`wgpu_core::global::Global`]: crate::global::Global
|
||||
/// [`Id`]: id::Id
|
||||
#[macro_export]
|
||||
macro_rules! gfx_select {
|
||||
($id:expr => $global:ident.$method:ident( $($param:expr),* )) => {
|
||||
match $id.backend() {
|
||||
wgt::Backend::Vulkan => $crate::gfx_if_vulkan!($global.$method::<$crate::api::Vulkan>( $($param),* )),
|
||||
wgt::Backend::Metal => $crate::gfx_if_metal!($global.$method::<$crate::api::Metal>( $($param),* )),
|
||||
wgt::Backend::Dx12 => $crate::gfx_if_dx12!($global.$method::<$crate::api::Dx12>( $($param),* )),
|
||||
wgt::Backend::Dx11 => $crate::gfx_if_dx11!($global.$method::<$crate::api::Dx11>( $($param),* )),
|
||||
wgt::Backend::Gl => $crate::gfx_if_gles!($global.$method::<$crate::api::Gles>( $($param),+ )),
|
||||
other => panic!("Unexpected backend {:?}", other),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Fast hash map used internally.
|
||||
type FastHashMap<K, V> =
|
||||
std::collections::HashMap<K, V, std::hash::BuildHasherDefault<rustc_hash::FxHasher>>;
|
||||
/// Fast hash set used internally.
|
||||
type FastHashSet<K> =
|
||||
std::collections::HashSet<K, std::hash::BuildHasherDefault<rustc_hash::FxHasher>>;
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn get_lowest_common_denom(a: u32, b: u32) -> u32 {
|
||||
let gcd = if a >= b {
|
||||
get_greatest_common_divisor(a, b)
|
||||
} else {
|
||||
get_greatest_common_divisor(b, a)
|
||||
};
|
||||
a * b / gcd
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn get_greatest_common_divisor(mut a: u32, mut b: u32) -> u32 {
|
||||
assert!(a >= b);
|
||||
loop {
|
||||
let c = a % b;
|
||||
if c == 0 {
|
||||
return b;
|
||||
} else {
|
||||
a = b;
|
||||
b = c;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lcd() {
|
||||
assert_eq!(get_lowest_common_denom(2, 2), 2);
|
||||
assert_eq!(get_lowest_common_denom(2, 3), 6);
|
||||
assert_eq!(get_lowest_common_denom(6, 4), 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gcd() {
|
||||
assert_eq!(get_greatest_common_divisor(5, 1), 1);
|
||||
assert_eq!(get_greatest_common_divisor(4, 2), 2);
|
||||
assert_eq!(get_greatest_common_divisor(6, 4), 2);
|
||||
assert_eq!(get_greatest_common_divisor(7, 7), 7);
|
||||
}
|
||||
438
third-party/vendor/wgpu-core/src/pipeline.rs
vendored
Normal file
438
third-party/vendor/wgpu-core/src/pipeline.rs
vendored
Normal file
|
|
@ -0,0 +1,438 @@
|
|||
use crate::{
|
||||
binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError},
|
||||
command::ColorAttachmentError,
|
||||
device::{DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext},
|
||||
id::{DeviceId, PipelineLayoutId, ShaderModuleId},
|
||||
resource::Resource,
|
||||
validation, Label, LifeGuard, Stored,
|
||||
};
|
||||
use arrayvec::ArrayVec;
|
||||
use std::{borrow::Cow, error::Error, fmt, marker::PhantomData, num::NonZeroU32};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Information about buffer bindings, which
|
||||
/// is validated against the shader (and pipeline)
|
||||
/// at draw time as opposed to initialization time.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct LateSizedBufferGroup {
|
||||
// The order has to match `BindGroup::late_buffer_binding_sizes`.
|
||||
pub(crate) shader_sizes: Vec<wgt::BufferAddress>,
|
||||
}
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum ShaderModuleSource<'a> {
|
||||
#[cfg(feature = "wgsl")]
|
||||
Wgsl(Cow<'a, str>),
|
||||
Naga(Cow<'static, naga::Module>),
|
||||
/// Dummy variant because `Naga` doesn't have a lifetime and without enough active features it
|
||||
/// could be the last one active.
|
||||
#[doc(hidden)]
|
||||
Dummy(PhantomData<&'a ()>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct ShaderModuleDescriptor<'a> {
|
||||
pub label: Label<'a>,
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub shader_bound_checks: wgt::ShaderBoundChecks,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ShaderModule<A: hal::Api> {
|
||||
pub(crate) raw: A::ShaderModule,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) interface: Option<validation::Interface>,
|
||||
#[cfg(debug_assertions)]
|
||||
pub(crate) label: String,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for ShaderModule<A> {
|
||||
const TYPE: &'static str = "ShaderModule";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn label(&self) -> &str {
|
||||
#[cfg(debug_assertions)]
|
||||
return &self.label;
|
||||
#[cfg(not(debug_assertions))]
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ShaderError<E> {
|
||||
pub source: String,
|
||||
pub label: Option<String>,
|
||||
pub inner: Box<E>,
|
||||
}
|
||||
#[cfg(feature = "wgsl")]
|
||||
impl fmt::Display for ShaderError<naga::front::wgsl::ParseError> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let label = self.label.as_deref().unwrap_or_default();
|
||||
let string = self.inner.emit_to_string(&self.source);
|
||||
write!(f, "\nShader '{label}' parsing {string}")
|
||||
}
|
||||
}
|
||||
impl fmt::Display for ShaderError<naga::WithSpan<naga::valid::ValidationError>> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use codespan_reporting::{
|
||||
diagnostic::{Diagnostic, Label},
|
||||
files::SimpleFile,
|
||||
term,
|
||||
};
|
||||
|
||||
let label = self.label.as_deref().unwrap_or_default();
|
||||
let files = SimpleFile::new(label, &self.source);
|
||||
let config = term::Config::default();
|
||||
let mut writer = term::termcolor::NoColor::new(Vec::new());
|
||||
|
||||
let diagnostic = Diagnostic::error().with_labels(
|
||||
self.inner
|
||||
.spans()
|
||||
.map(|&(span, ref desc)| {
|
||||
Label::primary((), span.to_range().unwrap()).with_message(desc.to_owned())
|
||||
})
|
||||
.collect(),
|
||||
);
|
||||
|
||||
term::emit(&mut writer, &config, &files, &diagnostic).expect("cannot write error");
|
||||
|
||||
write!(
|
||||
f,
|
||||
"\nShader validation {}",
|
||||
String::from_utf8_lossy(&writer.into_inner())
|
||||
)
|
||||
}
|
||||
}
|
||||
impl<E> Error for ShaderError<E>
|
||||
where
|
||||
ShaderError<E>: fmt::Display,
|
||||
E: Error + 'static,
|
||||
{
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
Some(&self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
//Note: `Clone` would require `WithSpan: Clone`.
|
||||
#[derive(Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateShaderModuleError {
|
||||
#[cfg(feature = "wgsl")]
|
||||
#[error(transparent)]
|
||||
Parsing(#[from] ShaderError<naga::front::wgsl::ParseError>),
|
||||
#[error("Failed to generate the backend-specific code")]
|
||||
Generation,
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error(transparent)]
|
||||
Validation(#[from] ShaderError<naga::WithSpan<naga::valid::ValidationError>>),
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
#[error(
|
||||
"Shader global {bind:?} uses a group index {group} that exceeds the max_bind_groups limit of {limit}."
|
||||
)]
|
||||
InvalidGroupIndex {
|
||||
bind: naga::ResourceBinding,
|
||||
group: u32,
|
||||
limit: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl CreateShaderModuleError {
|
||||
pub fn location(&self, source: &str) -> Option<naga::SourceLocation> {
|
||||
match *self {
|
||||
#[cfg(feature = "wgsl")]
|
||||
CreateShaderModuleError::Parsing(ref err) => err.inner.location(source),
|
||||
CreateShaderModuleError::Validation(ref err) => err.inner.location(source),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes a programmable pipeline stage.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct ProgrammableStageDescriptor<'a> {
|
||||
/// The compiled shader module for this stage.
|
||||
pub module: ShaderModuleId,
|
||||
/// The name of the entry point in the compiled shader. There must be a function with this name
|
||||
/// in the shader.
|
||||
pub entry_point: Cow<'a, str>,
|
||||
}
|
||||
|
||||
/// Number of implicit bind groups derived at pipeline creation.
|
||||
pub type ImplicitBindGroupCount = u8;
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ImplicitLayoutError {
|
||||
#[error("Missing IDs for deriving {0} bind groups")]
|
||||
MissingIds(ImplicitBindGroupCount),
|
||||
#[error("Unable to reflect the shader {0:?} interface")]
|
||||
ReflectionError(wgt::ShaderStages),
|
||||
#[error(transparent)]
|
||||
BindGroup(#[from] CreateBindGroupLayoutError),
|
||||
#[error(transparent)]
|
||||
Pipeline(#[from] CreatePipelineLayoutError),
|
||||
}
|
||||
|
||||
/// Describes a compute pipeline.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct ComputePipelineDescriptor<'a> {
|
||||
pub label: Label<'a>,
|
||||
/// The layout of bind groups for this pipeline.
|
||||
pub layout: Option<PipelineLayoutId>,
|
||||
/// The compiled compute stage and its entry point.
|
||||
pub stage: ProgrammableStageDescriptor<'a>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateComputePipelineError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Pipeline layout is invalid")]
|
||||
InvalidLayout,
|
||||
#[error("Unable to derive an implicit layout")]
|
||||
Implicit(#[from] ImplicitLayoutError),
|
||||
#[error("Error matching shader requirements against the pipeline")]
|
||||
Stage(#[from] validation::StageError),
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ComputePipeline<A: hal::Api> {
|
||||
pub(crate) raw: A::ComputePipeline,
|
||||
pub(crate) layout_id: Stored<PipelineLayoutId>,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) late_sized_buffer_groups: ArrayVec<LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for ComputePipeline<A> {
|
||||
const TYPE: &'static str = "ComputePipeline";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes how the vertex buffer is interpreted.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
|
||||
pub struct VertexBufferLayout<'a> {
|
||||
/// The stride, in bytes, between elements of this buffer.
|
||||
pub array_stride: wgt::BufferAddress,
|
||||
/// How often this vertex buffer is "stepped" forward.
|
||||
pub step_mode: wgt::VertexStepMode,
|
||||
/// The list of attributes which comprise a single vertex.
|
||||
pub attributes: Cow<'a, [wgt::VertexAttribute]>,
|
||||
}
|
||||
|
||||
/// Describes the vertex process in a render pipeline.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct VertexState<'a> {
|
||||
/// The compiled vertex stage and its entry point.
|
||||
pub stage: ProgrammableStageDescriptor<'a>,
|
||||
/// The format of any vertex buffers used with this pipeline.
|
||||
pub buffers: Cow<'a, [VertexBufferLayout<'a>]>,
|
||||
}
|
||||
|
||||
/// Describes fragment processing in a render pipeline.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct FragmentState<'a> {
|
||||
/// The compiled fragment stage and its entry point.
|
||||
pub stage: ProgrammableStageDescriptor<'a>,
|
||||
/// The effect of draw calls on the color aspect of the output target.
|
||||
pub targets: Cow<'a, [Option<wgt::ColorTargetState>]>,
|
||||
}
|
||||
|
||||
/// Describes a render (graphics) pipeline.
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct RenderPipelineDescriptor<'a> {
|
||||
pub label: Label<'a>,
|
||||
/// The layout of bind groups for this pipeline.
|
||||
pub layout: Option<PipelineLayoutId>,
|
||||
/// The vertex processing state for this pipeline.
|
||||
pub vertex: VertexState<'a>,
|
||||
/// The properties of the pipeline at the primitive assembly and rasterization level.
|
||||
#[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
|
||||
pub primitive: wgt::PrimitiveState,
|
||||
/// The effect of draw calls on the depth and stencil aspects of the output target, if any.
|
||||
#[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
|
||||
pub depth_stencil: Option<wgt::DepthStencilState>,
|
||||
/// The multi-sampling properties of the pipeline.
|
||||
#[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
|
||||
pub multisample: wgt::MultisampleState,
|
||||
/// The fragment processing state for this pipeline.
|
||||
pub fragment: Option<FragmentState<'a>>,
|
||||
/// If the pipeline will be used with a multiview render pass, this indicates how many array
|
||||
/// layers the attachments will have.
|
||||
pub multiview: Option<NonZeroU32>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ColorStateError {
|
||||
#[error("Format {0:?} is not renderable")]
|
||||
FormatNotRenderable(wgt::TextureFormat),
|
||||
#[error("Format {0:?} is not blendable")]
|
||||
FormatNotBlendable(wgt::TextureFormat),
|
||||
#[error("Format {0:?} does not have a color aspect")]
|
||||
FormatNotColor(wgt::TextureFormat),
|
||||
#[error("Format {0:?} can't be multisampled")]
|
||||
FormatNotMultisampled(wgt::TextureFormat),
|
||||
#[error("Output format {pipeline} is incompatible with the shader {shader}")]
|
||||
IncompatibleFormat {
|
||||
pipeline: validation::NumericType,
|
||||
shader: validation::NumericType,
|
||||
},
|
||||
#[error("Blend factors for {0:?} must be `One`")]
|
||||
InvalidMinMaxBlendFactors(wgt::BlendComponent),
|
||||
#[error("Invalid write mask {0:?}")]
|
||||
InvalidWriteMask(wgt::ColorWrites),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum DepthStencilStateError {
|
||||
#[error("Format {0:?} is not renderable")]
|
||||
FormatNotRenderable(wgt::TextureFormat),
|
||||
#[error("Format {0:?} does not have a depth aspect, but depth test/write is enabled")]
|
||||
FormatNotDepth(wgt::TextureFormat),
|
||||
#[error("Format {0:?} does not have a stencil aspect, but stencil test/write is enabled")]
|
||||
FormatNotStencil(wgt::TextureFormat),
|
||||
#[error("Format {0:?} can't be multisampled")]
|
||||
FormatNotMultisampled(wgt::TextureFormat),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateRenderPipelineError {
|
||||
#[error(transparent)]
|
||||
ColorAttachment(#[from] ColorAttachmentError),
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Pipeline layout is invalid")]
|
||||
InvalidLayout,
|
||||
#[error("Unable to derive an implicit layout")]
|
||||
Implicit(#[from] ImplicitLayoutError),
|
||||
#[error("Color state [{0}] is invalid")]
|
||||
ColorState(u8, #[source] ColorStateError),
|
||||
#[error("Depth/stencil state is invalid")]
|
||||
DepthStencilState(#[from] DepthStencilStateError),
|
||||
#[error("Invalid sample count {0}")]
|
||||
InvalidSampleCount(u32),
|
||||
#[error("The number of vertex buffers {given} exceeds the limit {limit}")]
|
||||
TooManyVertexBuffers { given: u32, limit: u32 },
|
||||
#[error("The total number of vertex attributes {given} exceeds the limit {limit}")]
|
||||
TooManyVertexAttributes { given: u32, limit: u32 },
|
||||
#[error("Vertex buffer {index} stride {given} exceeds the limit {limit}")]
|
||||
VertexStrideTooLarge { index: u32, given: u32, limit: u32 },
|
||||
#[error("Vertex buffer {index} stride {stride} does not respect `VERTEX_STRIDE_ALIGNMENT`")]
|
||||
UnalignedVertexStride {
|
||||
index: u32,
|
||||
stride: wgt::BufferAddress,
|
||||
},
|
||||
#[error("Vertex attribute at location {location} has invalid offset {offset}")]
|
||||
InvalidVertexAttributeOffset {
|
||||
location: wgt::ShaderLocation,
|
||||
offset: wgt::BufferAddress,
|
||||
},
|
||||
#[error("Two or more vertex attributes were assigned to the same location in the shader: {0}")]
|
||||
ShaderLocationClash(u32),
|
||||
#[error("Strip index format was not set to None but to {strip_index_format:?} while using the non-strip topology {topology:?}")]
|
||||
StripIndexFormatForNonStripTopology {
|
||||
strip_index_format: Option<wgt::IndexFormat>,
|
||||
topology: wgt::PrimitiveTopology,
|
||||
},
|
||||
#[error("Conservative Rasterization is only supported for wgt::PolygonMode::Fill")]
|
||||
ConservativeRasterizationNonFillPolygonMode,
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
#[error("Error matching {stage:?} shader requirements against the pipeline")]
|
||||
Stage {
|
||||
stage: wgt::ShaderStages,
|
||||
#[source]
|
||||
error: validation::StageError,
|
||||
},
|
||||
#[error("Internal error in {stage:?} shader: {error}")]
|
||||
Internal {
|
||||
stage: wgt::ShaderStages,
|
||||
error: String,
|
||||
},
|
||||
#[error("In the provided shader, the type given for group {group} binding {binding} has a size of {size}. As the device does not support `DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED`, the type must have a size that is a multiple of 16 bytes.")]
|
||||
UnalignedShader { group: u32, binding: u32, size: u64 },
|
||||
}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[repr(transparent)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct PipelineFlags: u32 {
|
||||
const BLEND_CONSTANT = 1 << 0;
|
||||
const STENCIL_REFERENCE = 1 << 1;
|
||||
const WRITES_DEPTH = 1 << 2;
|
||||
const WRITES_STENCIL = 1 << 3;
|
||||
}
|
||||
}
|
||||
|
||||
/// How a render pipeline will retrieve attributes from a particular vertex buffer.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct VertexStep {
|
||||
/// The byte stride in the buffer between one attribute value and the next.
|
||||
pub stride: wgt::BufferAddress,
|
||||
|
||||
/// Whether the buffer is indexed by vertex number or instance number.
|
||||
pub mode: wgt::VertexStepMode,
|
||||
}
|
||||
|
||||
impl Default for VertexStep {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
stride: 0,
|
||||
mode: wgt::VertexStepMode::Vertex,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RenderPipeline<A: hal::Api> {
|
||||
pub(crate) raw: A::RenderPipeline,
|
||||
pub(crate) layout_id: Stored<PipelineLayoutId>,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) pass_context: RenderPassContext,
|
||||
pub(crate) flags: PipelineFlags,
|
||||
pub(crate) strip_index_format: Option<wgt::IndexFormat>,
|
||||
pub(crate) vertex_steps: Vec<VertexStep>,
|
||||
pub(crate) late_sized_buffer_groups: ArrayVec<LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for RenderPipeline<A> {
|
||||
const TYPE: &'static str = "RenderPipeline";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
425
third-party/vendor/wgpu-core/src/present.rs
vendored
Normal file
425
third-party/vendor/wgpu-core/src/present.rs
vendored
Normal file
|
|
@ -0,0 +1,425 @@
|
|||
/*! Presentation.
|
||||
|
||||
## Lifecycle
|
||||
|
||||
Whenever a submission detects the use of any surface texture, it adds it to the device
|
||||
tracker for the duration of the submission (temporarily, while recording).
|
||||
It's added with `UNINITIALIZED` state and transitioned into `empty()` state.
|
||||
When this texture is presented, we remove it from the device tracker as well as
|
||||
extract it from the hub.
|
||||
!*/
|
||||
|
||||
use std::borrow::Borrow;
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
use crate::device::trace::Action;
|
||||
use crate::{
|
||||
conv,
|
||||
device::{DeviceError, MissingDownlevelFlags, WaitIdleError},
|
||||
global::Global,
|
||||
hal_api::HalApi,
|
||||
hub::Token,
|
||||
id::{DeviceId, SurfaceId, TextureId, Valid},
|
||||
identity::{GlobalIdentityHandlerFactory, Input},
|
||||
init_tracker::TextureInitTracker,
|
||||
resource, track, LifeGuard, Stored,
|
||||
};
|
||||
|
||||
use hal::{Queue as _, Surface as _};
|
||||
use thiserror::Error;
|
||||
use wgt::SurfaceStatus as Status;
|
||||
|
||||
const FRAME_TIMEOUT_MS: u32 = 1000;
|
||||
pub const DESIRED_NUM_FRAMES: u32 = 3;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Presentation {
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) config: wgt::SurfaceConfiguration<Vec<wgt::TextureFormat>>,
|
||||
#[allow(unused)]
|
||||
pub(crate) num_frames: u32,
|
||||
pub(crate) acquired_texture: Option<Stored<TextureId>>,
|
||||
}
|
||||
|
||||
impl Presentation {
|
||||
pub(crate) fn backend(&self) -> wgt::Backend {
|
||||
crate::id::TypedId::unzip(self.device_id.value.0).2
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum SurfaceError {
|
||||
#[error("Surface is invalid")]
|
||||
Invalid,
|
||||
#[error("Surface is not configured for presentation")]
|
||||
NotConfigured,
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Surface image is already acquired")]
|
||||
AlreadyAcquired,
|
||||
#[error("Acquired frame is still referenced")]
|
||||
StillReferenced,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ConfigureSurfaceError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Invalid surface")]
|
||||
InvalidSurface,
|
||||
#[error("The view format {0:?} is not compatible with texture format {1:?}, only changing srgb-ness is allowed.")]
|
||||
InvalidViewFormat(wgt::TextureFormat, wgt::TextureFormat),
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
#[error("`SurfaceOutput` must be dropped before a new `Surface` is made")]
|
||||
PreviousOutputExists,
|
||||
#[error("Both `Surface` width and height must be non-zero. Wait to recreate the `Surface` until the window has non-zero area.")]
|
||||
ZeroArea,
|
||||
#[error("Surface does not support the adapter's queue family")]
|
||||
UnsupportedQueueFamily,
|
||||
#[error("Requested format {requested:?} is not in list of supported formats: {available:?}")]
|
||||
UnsupportedFormat {
|
||||
requested: wgt::TextureFormat,
|
||||
available: Vec<wgt::TextureFormat>,
|
||||
},
|
||||
#[error("Requested present mode {requested:?} is not in the list of supported present modes: {available:?}")]
|
||||
UnsupportedPresentMode {
|
||||
requested: wgt::PresentMode,
|
||||
available: Vec<wgt::PresentMode>,
|
||||
},
|
||||
#[error("Requested alpha mode {requested:?} is not in the list of supported alpha modes: {available:?}")]
|
||||
UnsupportedAlphaMode {
|
||||
requested: wgt::CompositeAlphaMode,
|
||||
available: Vec<wgt::CompositeAlphaMode>,
|
||||
},
|
||||
#[error("Requested usage is not supported")]
|
||||
UnsupportedUsage,
|
||||
#[error("Gpu got stuck :(")]
|
||||
StuckGpu,
|
||||
}
|
||||
|
||||
impl From<WaitIdleError> for ConfigureSurfaceError {
|
||||
fn from(e: WaitIdleError) -> Self {
|
||||
match e {
|
||||
WaitIdleError::Device(d) => ConfigureSurfaceError::Device(d),
|
||||
WaitIdleError::WrongSubmissionIndex(..) => unreachable!(),
|
||||
WaitIdleError::StuckGpu => ConfigureSurfaceError::StuckGpu,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct SurfaceOutput {
|
||||
pub status: Status,
|
||||
pub texture_id: Option<TextureId>,
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
pub fn surface_get_current_texture<A: HalApi>(
|
||||
&self,
|
||||
surface_id: SurfaceId,
|
||||
texture_id_in: Input<G, TextureId>,
|
||||
) -> Result<SurfaceOutput, SurfaceError> {
|
||||
profiling::scope!("SwapChain::get_next_texture");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
let fid = hub.textures.prepare(texture_id_in);
|
||||
|
||||
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
|
||||
let surface = surface_guard
|
||||
.get_mut(surface_id)
|
||||
.map_err(|_| SurfaceError::Invalid)?;
|
||||
let (device_guard, mut token) = hub.devices.read(&mut token);
|
||||
|
||||
let (device, config) = match surface.presentation {
|
||||
Some(ref present) => {
|
||||
let device = &device_guard[present.device_id.value];
|
||||
(device, present.config.clone())
|
||||
}
|
||||
None => return Err(SurfaceError::NotConfigured),
|
||||
};
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref trace) = device.trace {
|
||||
trace.lock().add(Action::GetSurfaceTexture {
|
||||
id: fid.id(),
|
||||
parent_id: surface_id,
|
||||
});
|
||||
}
|
||||
#[cfg(not(feature = "trace"))]
|
||||
let _ = device;
|
||||
|
||||
let suf = A::get_surface_mut(surface);
|
||||
let (texture_id, status) = match unsafe {
|
||||
suf.unwrap()
|
||||
.raw
|
||||
.acquire_texture(Some(std::time::Duration::from_millis(
|
||||
FRAME_TIMEOUT_MS as u64,
|
||||
)))
|
||||
} {
|
||||
Ok(Some(ast)) => {
|
||||
let clear_view_desc = hal::TextureViewDescriptor {
|
||||
label: Some("(wgpu internal) clear surface texture view"),
|
||||
format: config.format,
|
||||
dimension: wgt::TextureViewDimension::D2,
|
||||
usage: hal::TextureUses::COLOR_TARGET,
|
||||
range: wgt::ImageSubresourceRange::default(),
|
||||
};
|
||||
let mut clear_views = smallvec::SmallVec::new();
|
||||
clear_views.push(
|
||||
unsafe {
|
||||
hal::Device::create_texture_view(
|
||||
&device.raw,
|
||||
ast.texture.borrow(),
|
||||
&clear_view_desc,
|
||||
)
|
||||
}
|
||||
.map_err(DeviceError::from)?,
|
||||
);
|
||||
|
||||
let present = surface.presentation.as_mut().unwrap();
|
||||
let texture = resource::Texture {
|
||||
inner: resource::TextureInner::Surface {
|
||||
raw: ast.texture,
|
||||
parent_id: Valid(surface_id),
|
||||
has_work: false,
|
||||
},
|
||||
device_id: present.device_id.clone(),
|
||||
desc: wgt::TextureDescriptor {
|
||||
label: (),
|
||||
size: wgt::Extent3d {
|
||||
width: config.width,
|
||||
height: config.height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
sample_count: 1,
|
||||
mip_level_count: 1,
|
||||
format: config.format,
|
||||
dimension: wgt::TextureDimension::D2,
|
||||
usage: config.usage,
|
||||
view_formats: config.view_formats,
|
||||
},
|
||||
hal_usage: conv::map_texture_usage(config.usage, config.format.into()),
|
||||
format_features: wgt::TextureFormatFeatures {
|
||||
allowed_usages: wgt::TextureUsages::RENDER_ATTACHMENT,
|
||||
flags: wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4
|
||||
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_RESOLVE,
|
||||
},
|
||||
initialization_status: TextureInitTracker::new(1, 1),
|
||||
full_range: track::TextureSelector {
|
||||
layers: 0..1,
|
||||
mips: 0..1,
|
||||
},
|
||||
life_guard: LifeGuard::new("<Surface>"),
|
||||
clear_mode: resource::TextureClearMode::RenderPass {
|
||||
clear_views,
|
||||
is_color: true,
|
||||
},
|
||||
};
|
||||
|
||||
let ref_count = texture.life_guard.add_ref();
|
||||
let id = fid.assign(texture, &mut token);
|
||||
|
||||
{
|
||||
// register it in the device tracker as uninitialized
|
||||
let mut trackers = device.trackers.lock();
|
||||
trackers.textures.insert_single(
|
||||
id.0,
|
||||
ref_count.clone(),
|
||||
hal::TextureUses::UNINITIALIZED,
|
||||
);
|
||||
}
|
||||
|
||||
if present.acquired_texture.is_some() {
|
||||
return Err(SurfaceError::AlreadyAcquired);
|
||||
}
|
||||
present.acquired_texture = Some(Stored {
|
||||
value: id,
|
||||
ref_count,
|
||||
});
|
||||
|
||||
let status = if ast.suboptimal {
|
||||
Status::Suboptimal
|
||||
} else {
|
||||
Status::Good
|
||||
};
|
||||
(Some(id.0), status)
|
||||
}
|
||||
Ok(None) => (None, Status::Timeout),
|
||||
Err(err) => (
|
||||
None,
|
||||
match err {
|
||||
hal::SurfaceError::Lost => Status::Lost,
|
||||
hal::SurfaceError::Device(err) => {
|
||||
return Err(DeviceError::from(err).into());
|
||||
}
|
||||
hal::SurfaceError::Outdated => Status::Outdated,
|
||||
hal::SurfaceError::Other(msg) => {
|
||||
log::error!("acquire error: {}", msg);
|
||||
Status::Lost
|
||||
}
|
||||
},
|
||||
),
|
||||
};
|
||||
|
||||
Ok(SurfaceOutput { status, texture_id })
|
||||
}
|
||||
|
||||
pub fn surface_present<A: HalApi>(
|
||||
&self,
|
||||
surface_id: SurfaceId,
|
||||
) -> Result<Status, SurfaceError> {
|
||||
profiling::scope!("SwapChain::present");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
|
||||
let surface = surface_guard
|
||||
.get_mut(surface_id)
|
||||
.map_err(|_| SurfaceError::Invalid)?;
|
||||
let (mut device_guard, mut token) = hub.devices.write(&mut token);
|
||||
|
||||
let present = match surface.presentation {
|
||||
Some(ref mut present) => present,
|
||||
None => return Err(SurfaceError::NotConfigured),
|
||||
};
|
||||
|
||||
let device = &mut device_guard[present.device_id.value];
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref trace) = device.trace {
|
||||
trace.lock().add(Action::Present(surface_id));
|
||||
}
|
||||
|
||||
let result = {
|
||||
let texture_id = present
|
||||
.acquired_texture
|
||||
.take()
|
||||
.ok_or(SurfaceError::AlreadyAcquired)?;
|
||||
|
||||
// The texture ID got added to the device tracker by `submit()`,
|
||||
// and now we are moving it away.
|
||||
log::debug!(
|
||||
"Removing swapchain texture {:?} from the device tracker",
|
||||
texture_id.value
|
||||
);
|
||||
device.trackers.lock().textures.remove(texture_id.value);
|
||||
|
||||
let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token);
|
||||
if let Some(texture) = texture {
|
||||
if let resource::TextureClearMode::RenderPass { clear_views, .. } =
|
||||
texture.clear_mode
|
||||
{
|
||||
for clear_view in clear_views {
|
||||
unsafe {
|
||||
hal::Device::destroy_texture_view(&device.raw, clear_view);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let suf = A::get_surface_mut(surface);
|
||||
match texture.inner {
|
||||
resource::TextureInner::Surface {
|
||||
raw,
|
||||
parent_id,
|
||||
has_work,
|
||||
} => {
|
||||
if surface_id != parent_id.0 {
|
||||
log::error!("Presented frame is from a different surface");
|
||||
Err(hal::SurfaceError::Lost)
|
||||
} else if !has_work {
|
||||
log::error!("No work has been submitted for this frame");
|
||||
unsafe { suf.unwrap().raw.discard_texture(raw) };
|
||||
Err(hal::SurfaceError::Outdated)
|
||||
} else {
|
||||
unsafe { device.queue.present(&mut suf.unwrap().raw, raw) }
|
||||
}
|
||||
}
|
||||
resource::TextureInner::Native { .. } => unreachable!(),
|
||||
}
|
||||
} else {
|
||||
Err(hal::SurfaceError::Outdated) //TODO?
|
||||
}
|
||||
};
|
||||
|
||||
log::debug!("Presented. End of Frame");
|
||||
|
||||
match result {
|
||||
Ok(()) => Ok(Status::Good),
|
||||
Err(err) => match err {
|
||||
hal::SurfaceError::Lost => Ok(Status::Lost),
|
||||
hal::SurfaceError::Device(err) => Err(SurfaceError::from(DeviceError::from(err))),
|
||||
hal::SurfaceError::Outdated => Ok(Status::Outdated),
|
||||
hal::SurfaceError::Other(msg) => {
|
||||
log::error!("acquire error: {}", msg);
|
||||
Err(SurfaceError::Invalid)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn surface_texture_discard<A: HalApi>(
|
||||
&self,
|
||||
surface_id: SurfaceId,
|
||||
) -> Result<(), SurfaceError> {
|
||||
profiling::scope!("SwapChain::discard");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
|
||||
let surface = surface_guard
|
||||
.get_mut(surface_id)
|
||||
.map_err(|_| SurfaceError::Invalid)?;
|
||||
let (mut device_guard, mut token) = hub.devices.write(&mut token);
|
||||
|
||||
let present = match surface.presentation {
|
||||
Some(ref mut present) => present,
|
||||
None => return Err(SurfaceError::NotConfigured),
|
||||
};
|
||||
|
||||
let device = &mut device_guard[present.device_id.value];
|
||||
|
||||
#[cfg(feature = "trace")]
|
||||
if let Some(ref trace) = device.trace {
|
||||
trace.lock().add(Action::DiscardSurfaceTexture(surface_id));
|
||||
}
|
||||
|
||||
{
|
||||
let texture_id = present
|
||||
.acquired_texture
|
||||
.take()
|
||||
.ok_or(SurfaceError::AlreadyAcquired)?;
|
||||
|
||||
// The texture ID got added to the device tracker by `submit()`,
|
||||
// and now we are moving it away.
|
||||
device.trackers.lock().textures.remove(texture_id.value);
|
||||
|
||||
let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token);
|
||||
if let Some(texture) = texture {
|
||||
let suf = A::get_surface_mut(surface);
|
||||
match texture.inner {
|
||||
resource::TextureInner::Surface {
|
||||
raw,
|
||||
parent_id,
|
||||
has_work: _,
|
||||
} => {
|
||||
if surface_id == parent_id.0 {
|
||||
unsafe { suf.unwrap().raw.discard_texture(raw) };
|
||||
} else {
|
||||
log::warn!("Surface texture is outdated");
|
||||
}
|
||||
}
|
||||
resource::TextureInner::Native { .. } => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
200
third-party/vendor/wgpu-core/src/registry.rs
vendored
Normal file
200
third-party/vendor/wgpu-core/src/registry.rs
vendored
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use wgt::Backend;
|
||||
|
||||
use crate::{
|
||||
hub::{Access, Token},
|
||||
id,
|
||||
identity::{IdentityHandler, IdentityHandlerFactory},
|
||||
resource::Resource,
|
||||
storage::Storage,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Registry<T: Resource, I: id::TypedId, F: IdentityHandlerFactory<I>> {
|
||||
identity: F::Filter,
|
||||
pub(crate) data: RwLock<Storage<T, I>>,
|
||||
backend: Backend,
|
||||
}
|
||||
|
||||
impl<T: Resource, I: id::TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
|
||||
pub(crate) fn new(backend: Backend, factory: &F) -> Self {
|
||||
Self {
|
||||
identity: factory.spawn(),
|
||||
data: RwLock::new(Storage {
|
||||
map: Vec::new(),
|
||||
kind: T::TYPE,
|
||||
_phantom: PhantomData,
|
||||
}),
|
||||
backend,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn without_backend(factory: &F, kind: &'static str) -> Self {
|
||||
Self {
|
||||
identity: factory.spawn(),
|
||||
data: RwLock::new(Storage {
|
||||
map: Vec::new(),
|
||||
kind,
|
||||
_phantom: PhantomData,
|
||||
}),
|
||||
backend: Backend::Empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub(crate) struct FutureId<'a, I: id::TypedId, T> {
|
||||
id: I,
|
||||
data: &'a RwLock<Storage<T, I>>,
|
||||
}
|
||||
|
||||
impl<I: id::TypedId + Copy, T> FutureId<'_, I, T> {
|
||||
#[cfg(feature = "trace")]
|
||||
pub fn id(&self) -> I {
|
||||
self.id
|
||||
}
|
||||
|
||||
pub fn into_id(self) -> I {
|
||||
self.id
|
||||
}
|
||||
|
||||
pub fn assign<'a, A: Access<T>>(self, value: T, _: &'a mut Token<A>) -> id::Valid<I> {
|
||||
self.data.write().insert(self.id, value);
|
||||
id::Valid(self.id)
|
||||
}
|
||||
|
||||
pub fn assign_error<'a, A: Access<T>>(self, label: &str, _: &'a mut Token<A>) -> I {
|
||||
self.data.write().insert_error(self.id, label);
|
||||
self.id
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Resource, I: id::TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
|
||||
pub(crate) fn prepare(
|
||||
&self,
|
||||
id_in: <F::Filter as IdentityHandler<I>>::Input,
|
||||
) -> FutureId<I, T> {
|
||||
FutureId {
|
||||
id: self.identity.process(id_in, self.backend),
|
||||
data: &self.data,
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquire read access to this `Registry`'s contents.
|
||||
///
|
||||
/// The caller must present a mutable reference to a `Token<A>`,
|
||||
/// for some type `A` that comes before this `Registry`'s resource
|
||||
/// type `T` in the lock ordering. A `Token<Root>` grants
|
||||
/// permission to lock any field; see [`Token::root`].
|
||||
///
|
||||
/// Once the read lock is acquired, return a new `Token<T>`, along
|
||||
/// with a read guard for this `Registry`'s [`Storage`], which can
|
||||
/// be indexed by id to get at the actual resources.
|
||||
///
|
||||
/// The borrow checker ensures that the caller cannot again access
|
||||
/// its `Token<A>` until it has dropped both the guard and the
|
||||
/// `Token<T>`.
|
||||
///
|
||||
/// See the [`Hub`] type for more details on locking.
|
||||
///
|
||||
/// [`Hub`]: crate::hub::Hub
|
||||
pub(crate) fn read<'a, A: Access<T>>(
|
||||
&'a self,
|
||||
_token: &'a mut Token<A>,
|
||||
) -> (RwLockReadGuard<'a, Storage<T, I>>, Token<'a, T>) {
|
||||
(self.data.read(), Token::new())
|
||||
}
|
||||
|
||||
/// Acquire write access to this `Registry`'s contents.
|
||||
///
|
||||
/// The caller must present a mutable reference to a `Token<A>`,
|
||||
/// for some type `A` that comes before this `Registry`'s resource
|
||||
/// type `T` in the lock ordering. A `Token<Root>` grants
|
||||
/// permission to lock any field; see [`Token::root`].
|
||||
///
|
||||
/// Once the lock is acquired, return a new `Token<T>`, along with
|
||||
/// a write guard for this `Registry`'s [`Storage`], which can be
|
||||
/// indexed by id to get at the actual resources.
|
||||
///
|
||||
/// The borrow checker ensures that the caller cannot again access
|
||||
/// its `Token<A>` until it has dropped both the guard and the
|
||||
/// `Token<T>`.
|
||||
///
|
||||
/// See the [`Hub`] type for more details on locking.
|
||||
///
|
||||
/// [`Hub`]: crate::hub::Hub
|
||||
pub(crate) fn write<'a, A: Access<T>>(
|
||||
&'a self,
|
||||
_token: &'a mut Token<A>,
|
||||
) -> (RwLockWriteGuard<'a, Storage<T, I>>, Token<'a, T>) {
|
||||
(self.data.write(), Token::new())
|
||||
}
|
||||
|
||||
/// Unregister the resource at `id`.
|
||||
///
|
||||
/// The caller must prove that it already holds a write lock for
|
||||
/// this `Registry` by passing a mutable reference to this
|
||||
/// `Registry`'s storage, obtained from the write guard returned
|
||||
/// by a previous call to [`write`], as the `guard` parameter.
|
||||
pub fn unregister_locked(&self, id: I, guard: &mut Storage<T, I>) -> Option<T> {
|
||||
let value = guard.remove(id);
|
||||
//Note: careful about the order here!
|
||||
self.identity.free(id);
|
||||
//Returning None is legal if it's an error ID
|
||||
value
|
||||
}
|
||||
|
||||
/// Unregister the resource at `id` and return its value, if any.
|
||||
///
|
||||
/// The caller must present a mutable reference to a `Token<A>`,
|
||||
/// for some type `A` that comes before this `Registry`'s resource
|
||||
/// type `T` in the lock ordering.
|
||||
///
|
||||
/// This returns a `Token<T>`, but it's almost useless, because it
|
||||
/// doesn't return a lock guard to go with it: its only effect is
|
||||
/// to make the token you passed to this function inaccessible.
|
||||
/// However, the `Token<T>` can be used to satisfy some functions'
|
||||
/// bureacratic expectations that you will have one available.
|
||||
///
|
||||
/// The borrow checker ensures that the caller cannot again access
|
||||
/// its `Token<A>` until it has dropped both the guard and the
|
||||
/// `Token<T>`.
|
||||
///
|
||||
/// See the [`Hub`] type for more details on locking.
|
||||
///
|
||||
/// [`Hub`]: crate::hub::Hub
|
||||
pub(crate) fn unregister<'a, A: Access<T>>(
|
||||
&self,
|
||||
id: I,
|
||||
_token: &'a mut Token<A>,
|
||||
) -> (Option<T>, Token<'a, T>) {
|
||||
let value = self.data.write().remove(id);
|
||||
//Note: careful about the order here!
|
||||
self.identity.free(id);
|
||||
//Returning None is legal if it's an error ID
|
||||
(value, Token::new())
|
||||
}
|
||||
|
||||
pub fn label_for_resource(&self, id: I) -> String {
|
||||
let guard = self.data.read();
|
||||
|
||||
let type_name = guard.kind;
|
||||
match guard.get(id) {
|
||||
Ok(res) => {
|
||||
let label = res.label();
|
||||
if label.is_empty() {
|
||||
format!("<{}-{:?}>", type_name, id.unzip())
|
||||
} else {
|
||||
label.to_string()
|
||||
}
|
||||
}
|
||||
Err(_) => format!(
|
||||
"<Invalid-{} label={}>",
|
||||
type_name,
|
||||
guard.label_for_invalid_id(id)
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
854
third-party/vendor/wgpu-core/src/resource.rs
vendored
Normal file
854
third-party/vendor/wgpu-core/src/resource.rs
vendored
Normal file
|
|
@ -0,0 +1,854 @@
|
|||
use crate::{
|
||||
device::{DeviceError, HostMap, MissingDownlevelFlags, MissingFeatures},
|
||||
global::Global,
|
||||
hal_api::HalApi,
|
||||
hub::Token,
|
||||
id::{AdapterId, DeviceId, SurfaceId, TextureId, Valid},
|
||||
identity::GlobalIdentityHandlerFactory,
|
||||
init_tracker::{BufferInitTracker, TextureInitTracker},
|
||||
track::TextureSelector,
|
||||
validation::MissingBufferUsageError,
|
||||
Label, LifeGuard, RefCount, Stored,
|
||||
};
|
||||
|
||||
use smallvec::SmallVec;
|
||||
use thiserror::Error;
|
||||
|
||||
use std::{borrow::Borrow, ops::Range, ptr::NonNull};
|
||||
|
||||
pub trait Resource {
|
||||
const TYPE: &'static str;
|
||||
fn life_guard(&self) -> &LifeGuard;
|
||||
fn label(&self) -> &str {
|
||||
#[cfg(debug_assertions)]
|
||||
return &self.life_guard().label;
|
||||
#[cfg(not(debug_assertions))]
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
/// The status code provided to the buffer mapping callback.
|
||||
///
|
||||
/// This is very similar to `BufferAccessResult`, except that this is FFI-friendly.
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub enum BufferMapAsyncStatus {
|
||||
/// The Buffer is sucessfully mapped, `get_mapped_range` can be called.
|
||||
///
|
||||
/// All other variants of this enum represent failures to map the buffer.
|
||||
Success,
|
||||
/// The buffer is already mapped.
|
||||
///
|
||||
/// While this is treated as an error, it does not prevent mapped range from being accessed.
|
||||
AlreadyMapped,
|
||||
/// Mapping was already requested.
|
||||
MapAlreadyPending,
|
||||
/// An unknown error.
|
||||
Error,
|
||||
/// Mapping was aborted (by unmapping or destroying the buffer before mapping
|
||||
/// happened).
|
||||
Aborted,
|
||||
/// The context is Lost.
|
||||
ContextLost,
|
||||
/// The buffer is in an invalid state.
|
||||
Invalid,
|
||||
/// The range isn't fully contained in the buffer.
|
||||
InvalidRange,
|
||||
/// The range isn't properly aligned.
|
||||
InvalidAlignment,
|
||||
/// Incompatible usage flags.
|
||||
InvalidUsageFlags,
|
||||
}
|
||||
|
||||
pub(crate) enum BufferMapState<A: hal::Api> {
|
||||
/// Mapped at creation.
|
||||
Init {
|
||||
ptr: NonNull<u8>,
|
||||
stage_buffer: A::Buffer,
|
||||
needs_flush: bool,
|
||||
},
|
||||
/// Waiting for GPU to be done before mapping
|
||||
Waiting(BufferPendingMapping),
|
||||
/// Mapped
|
||||
Active {
|
||||
ptr: NonNull<u8>,
|
||||
range: hal::MemoryRange,
|
||||
host: HostMap,
|
||||
},
|
||||
/// Not mapped
|
||||
Idle,
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
))]
|
||||
unsafe impl<A: hal::Api> Send for BufferMapState<A> {}
|
||||
#[cfg(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
))]
|
||||
unsafe impl<A: hal::Api> Sync for BufferMapState<A> {}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct BufferMapCallbackC {
|
||||
pub callback: unsafe extern "C" fn(status: BufferMapAsyncStatus, user_data: *mut u8),
|
||||
pub user_data: *mut u8,
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
))]
|
||||
unsafe impl Send for BufferMapCallbackC {}
|
||||
|
||||
pub struct BufferMapCallback {
|
||||
// We wrap this so creating the enum in the C variant can be unsafe,
|
||||
// allowing our call function to be safe.
|
||||
inner: Option<BufferMapCallbackInner>,
|
||||
}
|
||||
|
||||
#[cfg(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
))]
|
||||
type BufferMapCallbackCallback = Box<dyn FnOnce(BufferAccessResult) + Send + 'static>;
|
||||
#[cfg(not(any(
|
||||
not(target_arch = "wasm32"),
|
||||
all(
|
||||
feature = "fragile-send-sync-non-atomic-wasm",
|
||||
not(target_feature = "atomics")
|
||||
)
|
||||
)))]
|
||||
type BufferMapCallbackCallback = Box<dyn FnOnce(BufferAccessResult) + 'static>;
|
||||
|
||||
enum BufferMapCallbackInner {
|
||||
Rust { callback: BufferMapCallbackCallback },
|
||||
C { inner: BufferMapCallbackC },
|
||||
}
|
||||
|
||||
impl BufferMapCallback {
|
||||
pub fn from_rust(callback: BufferMapCallbackCallback) -> Self {
|
||||
Self {
|
||||
inner: Some(BufferMapCallbackInner::Rust { callback }),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - The callback pointer must be valid to call with the provided user_data
|
||||
/// pointer.
|
||||
///
|
||||
/// - Both pointers must point to valid memory until the callback is
|
||||
/// invoked, which may happen at an unspecified time.
|
||||
pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self {
|
||||
Self {
|
||||
inner: Some(BufferMapCallbackInner::C { inner }),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn call(mut self, result: BufferAccessResult) {
|
||||
match self.inner.take() {
|
||||
Some(BufferMapCallbackInner::Rust { callback }) => {
|
||||
callback(result);
|
||||
}
|
||||
// SAFETY: the contract of the call to from_c says that this unsafe is sound.
|
||||
Some(BufferMapCallbackInner::C { inner }) => unsafe {
|
||||
let status = match result {
|
||||
Ok(()) => BufferMapAsyncStatus::Success,
|
||||
Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost,
|
||||
Err(BufferAccessError::Invalid) | Err(BufferAccessError::Destroyed) => {
|
||||
BufferMapAsyncStatus::Invalid
|
||||
}
|
||||
Err(BufferAccessError::AlreadyMapped) => BufferMapAsyncStatus::AlreadyMapped,
|
||||
Err(BufferAccessError::MapAlreadyPending) => {
|
||||
BufferMapAsyncStatus::MapAlreadyPending
|
||||
}
|
||||
Err(BufferAccessError::MissingBufferUsage(_)) => {
|
||||
BufferMapAsyncStatus::InvalidUsageFlags
|
||||
}
|
||||
Err(BufferAccessError::UnalignedRange)
|
||||
| Err(BufferAccessError::UnalignedRangeSize { .. })
|
||||
| Err(BufferAccessError::UnalignedOffset { .. }) => {
|
||||
BufferMapAsyncStatus::InvalidAlignment
|
||||
}
|
||||
Err(BufferAccessError::OutOfBoundsUnderrun { .. })
|
||||
| Err(BufferAccessError::OutOfBoundsOverrun { .. })
|
||||
| Err(BufferAccessError::NegativeRange { .. }) => {
|
||||
BufferMapAsyncStatus::InvalidRange
|
||||
}
|
||||
Err(_) => BufferMapAsyncStatus::Error,
|
||||
};
|
||||
|
||||
(inner.callback)(status, inner.user_data);
|
||||
},
|
||||
None => {
|
||||
panic!("Map callback invoked twice");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for BufferMapCallback {
|
||||
fn drop(&mut self) {
|
||||
if self.inner.is_some() {
|
||||
panic!("Map callback was leaked");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BufferMapOperation {
|
||||
pub host: HostMap,
|
||||
pub callback: BufferMapCallback,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum BufferAccessError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Buffer map failed")]
|
||||
Failed,
|
||||
#[error("Buffer is invalid")]
|
||||
Invalid,
|
||||
#[error("Buffer is destroyed")]
|
||||
Destroyed,
|
||||
#[error("Buffer is already mapped")]
|
||||
AlreadyMapped,
|
||||
#[error("Buffer map is pending")]
|
||||
MapAlreadyPending,
|
||||
#[error(transparent)]
|
||||
MissingBufferUsage(#[from] MissingBufferUsageError),
|
||||
#[error("Buffer is not mapped")]
|
||||
NotMapped,
|
||||
#[error(
|
||||
"Buffer map range must start aligned to `MAP_ALIGNMENT` and end to `COPY_BUFFER_ALIGNMENT`"
|
||||
)]
|
||||
UnalignedRange,
|
||||
#[error("Buffer offset invalid: offset {offset} must be multiple of 8")]
|
||||
UnalignedOffset { offset: wgt::BufferAddress },
|
||||
#[error("Buffer range size invalid: range_size {range_size} must be multiple of 4")]
|
||||
UnalignedRangeSize { range_size: wgt::BufferAddress },
|
||||
#[error("Buffer access out of bounds: index {index} would underrun the buffer (limit: {min})")]
|
||||
OutOfBoundsUnderrun {
|
||||
index: wgt::BufferAddress,
|
||||
min: wgt::BufferAddress,
|
||||
},
|
||||
#[error(
|
||||
"Buffer access out of bounds: last index {index} would overrun the buffer (limit: {max})"
|
||||
)]
|
||||
OutOfBoundsOverrun {
|
||||
index: wgt::BufferAddress,
|
||||
max: wgt::BufferAddress,
|
||||
},
|
||||
#[error("Buffer map range start {start} is greater than end {end}")]
|
||||
NegativeRange {
|
||||
start: wgt::BufferAddress,
|
||||
end: wgt::BufferAddress,
|
||||
},
|
||||
#[error("Buffer map aborted")]
|
||||
MapAborted,
|
||||
}
|
||||
|
||||
pub type BufferAccessResult = Result<(), BufferAccessError>;
|
||||
pub(crate) struct BufferPendingMapping {
|
||||
pub range: Range<wgt::BufferAddress>,
|
||||
pub op: BufferMapOperation,
|
||||
// hold the parent alive while the mapping is active
|
||||
pub _parent_ref_count: RefCount,
|
||||
}
|
||||
|
||||
pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
|
||||
|
||||
pub struct Buffer<A: hal::Api> {
|
||||
pub(crate) raw: Option<A::Buffer>,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) usage: wgt::BufferUsages,
|
||||
pub(crate) size: wgt::BufferAddress,
|
||||
pub(crate) initialization_status: BufferInitTracker,
|
||||
pub(crate) sync_mapped_writes: Option<hal::MemoryRange>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) map_state: BufferMapState<A>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateBufferError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Failed to map buffer while creating: {0}")]
|
||||
AccessError(#[from] BufferAccessError),
|
||||
#[error("Buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")]
|
||||
UnalignedSize,
|
||||
#[error("Invalid usage flags {0:?}")]
|
||||
InvalidUsage(wgt::BufferUsages),
|
||||
#[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")]
|
||||
UsageMismatch(wgt::BufferUsages),
|
||||
#[error("Buffer size {requested} is greater than the maximum buffer size ({maximum})")]
|
||||
MaxBufferSize { requested: u64, maximum: u64 },
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for Buffer<A> {
|
||||
const TYPE: &'static str = "Buffer";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
/// A temporary buffer, consumed by the command that uses it.
|
||||
///
|
||||
/// A [`StagingBuffer`] is designed for one-shot uploads of data to the GPU. It
|
||||
/// is always created mapped, and the command that uses it destroys the buffer
|
||||
/// when it is done.
|
||||
///
|
||||
/// [`StagingBuffer`]s can be created with [`queue_create_staging_buffer`] and
|
||||
/// used with [`queue_write_staging_buffer`]. They are also used internally by
|
||||
/// operations like [`queue_write_texture`] that need to upload data to the GPU,
|
||||
/// but that don't belong to any particular wgpu command buffer.
|
||||
///
|
||||
/// Used `StagingBuffer`s are accumulated in [`Device::pending_writes`], to be
|
||||
/// freed once their associated operation's queue submission has finished
|
||||
/// execution.
|
||||
///
|
||||
/// [`queue_create_staging_buffer`]: Global::queue_create_staging_buffer
|
||||
/// [`queue_write_staging_buffer`]: Global::queue_write_staging_buffer
|
||||
/// [`queue_write_texture`]: Global::queue_write_texture
|
||||
/// [`Device::pending_writes`]: crate::device::Device
|
||||
pub struct StagingBuffer<A: hal::Api> {
|
||||
pub(crate) raw: A::Buffer,
|
||||
pub(crate) size: wgt::BufferAddress,
|
||||
pub(crate) is_coherent: bool,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for StagingBuffer<A> {
|
||||
const TYPE: &'static str = "StagingBuffer";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn label(&self) -> &str {
|
||||
"<StagingBuffer>"
|
||||
}
|
||||
}
|
||||
|
||||
pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>, Vec<wgt::TextureFormat>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum TextureInner<A: hal::Api> {
|
||||
Native {
|
||||
raw: Option<A::Texture>,
|
||||
},
|
||||
Surface {
|
||||
raw: A::SurfaceTexture,
|
||||
parent_id: Valid<SurfaceId>,
|
||||
has_work: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl<A: hal::Api> TextureInner<A> {
|
||||
pub fn as_raw(&self) -> Option<&A::Texture> {
|
||||
match *self {
|
||||
Self::Native { raw: Some(ref tex) } => Some(tex),
|
||||
Self::Native { raw: None } => None,
|
||||
Self::Surface { ref raw, .. } => Some(raw.borrow()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TextureClearMode<A: hal::Api> {
|
||||
BufferCopy,
|
||||
// View for clear via RenderPass for every subsurface (mip/layer/slice)
|
||||
RenderPass {
|
||||
clear_views: SmallVec<[A::TextureView; 1]>,
|
||||
is_color: bool,
|
||||
},
|
||||
// Texture can't be cleared, attempting to do so will cause panic.
|
||||
// (either because it is impossible for the type of texture or it is being destroyed)
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Texture<A: hal::Api> {
|
||||
pub(crate) inner: TextureInner<A>,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) desc: wgt::TextureDescriptor<(), Vec<wgt::TextureFormat>>,
|
||||
pub(crate) hal_usage: hal::TextureUses,
|
||||
pub(crate) format_features: wgt::TextureFormatFeatures,
|
||||
pub(crate) initialization_status: TextureInitTracker,
|
||||
pub(crate) full_range: TextureSelector,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) clear_mode: TextureClearMode<A>,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Texture<A> {
|
||||
pub(crate) fn get_clear_view(&self, mip_level: u32, depth_or_layer: u32) -> &A::TextureView {
|
||||
match self.clear_mode {
|
||||
TextureClearMode::BufferCopy => {
|
||||
panic!("Given texture is cleared with buffer copies, not render passes")
|
||||
}
|
||||
TextureClearMode::None => {
|
||||
panic!("Given texture can't be cleared")
|
||||
}
|
||||
TextureClearMode::RenderPass {
|
||||
ref clear_views, ..
|
||||
} => {
|
||||
let index = if self.desc.dimension == wgt::TextureDimension::D3 {
|
||||
(0..mip_level).fold(0, |acc, mip| {
|
||||
acc + (self.desc.size.depth_or_array_layers >> mip).max(1)
|
||||
})
|
||||
} else {
|
||||
mip_level * self.desc.size.depth_or_array_layers
|
||||
} + depth_or_layer;
|
||||
&clear_views[index as usize]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<G: GlobalIdentityHandlerFactory> Global<G> {
|
||||
/// # Safety
|
||||
///
|
||||
/// - The raw texture handle must not be manually destroyed
|
||||
pub unsafe fn texture_as_hal<A: HalApi, F: FnOnce(Option<&A::Texture>)>(
|
||||
&self,
|
||||
id: TextureId,
|
||||
hal_texture_callback: F,
|
||||
) {
|
||||
profiling::scope!("Texture::as_hal");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (guard, _) = hub.textures.read(&mut token);
|
||||
let texture = guard.try_get(id).ok().flatten();
|
||||
let hal_texture = texture.and_then(|tex| tex.inner.as_raw());
|
||||
|
||||
hal_texture_callback(hal_texture);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - The raw adapter handle must not be manually destroyed
|
||||
pub unsafe fn adapter_as_hal<A: HalApi, F: FnOnce(Option<&A::Adapter>) -> R, R>(
|
||||
&self,
|
||||
id: AdapterId,
|
||||
hal_adapter_callback: F,
|
||||
) -> R {
|
||||
profiling::scope!("Adapter::as_hal");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
|
||||
let (guard, _) = hub.adapters.read(&mut token);
|
||||
let adapter = guard.try_get(id).ok().flatten();
|
||||
let hal_adapter = adapter.map(|adapter| &adapter.raw.adapter);
|
||||
|
||||
hal_adapter_callback(hal_adapter)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - The raw device handle must not be manually destroyed
|
||||
pub unsafe fn device_as_hal<A: HalApi, F: FnOnce(Option<&A::Device>) -> R, R>(
|
||||
&self,
|
||||
id: DeviceId,
|
||||
hal_device_callback: F,
|
||||
) -> R {
|
||||
profiling::scope!("Device::as_hal");
|
||||
|
||||
let hub = A::hub(self);
|
||||
let mut token = Token::root();
|
||||
let (guard, _) = hub.devices.read(&mut token);
|
||||
let device = guard.try_get(id).ok().flatten();
|
||||
let hal_device = device.map(|device| &device.raw);
|
||||
|
||||
hal_device_callback(hal_device)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
/// - The raw surface handle must not be manually destroyed
|
||||
pub unsafe fn surface_as_hal_mut<A: HalApi, F: FnOnce(Option<&mut A::Surface>) -> R, R>(
|
||||
&self,
|
||||
id: SurfaceId,
|
||||
hal_surface_callback: F,
|
||||
) -> R {
|
||||
profiling::scope!("Surface::as_hal_mut");
|
||||
|
||||
let mut token = Token::root();
|
||||
let (mut guard, _) = self.surfaces.write(&mut token);
|
||||
let surface = guard.get_mut(id).ok();
|
||||
let hal_surface = surface
|
||||
.and_then(|surface| A::get_surface_mut(surface))
|
||||
.map(|surface| &mut surface.raw);
|
||||
|
||||
hal_surface_callback(hal_surface)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum TextureErrorDimension {
|
||||
X,
|
||||
Y,
|
||||
Z,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum TextureDimensionError {
|
||||
#[error("Dimension {0:?} is zero")]
|
||||
Zero(TextureErrorDimension),
|
||||
#[error("Dimension {dim:?} value {given} exceeds the limit of {limit}")]
|
||||
LimitExceeded {
|
||||
dim: TextureErrorDimension,
|
||||
given: u32,
|
||||
limit: u32,
|
||||
},
|
||||
#[error("Sample count {0} is invalid")]
|
||||
InvalidSampleCount(u32),
|
||||
#[error("Width {width} is not a multiple of {format:?}'s block width ({block_width})")]
|
||||
NotMultipleOfBlockWidth {
|
||||
width: u32,
|
||||
block_width: u32,
|
||||
format: wgt::TextureFormat,
|
||||
},
|
||||
#[error("Height {height} is not a multiple of {format:?}'s block height ({block_height})")]
|
||||
NotMultipleOfBlockHeight {
|
||||
height: u32,
|
||||
block_height: u32,
|
||||
format: wgt::TextureFormat,
|
||||
},
|
||||
#[error("Multisampled texture depth or array layers must be 1, got {0}")]
|
||||
MultisampledDepthOrArrayLayer(u32),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateTextureError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Invalid usage flags {0:?}")]
|
||||
InvalidUsage(wgt::TextureUsages),
|
||||
#[error(transparent)]
|
||||
InvalidDimension(#[from] TextureDimensionError),
|
||||
#[error("Depth texture ({1:?}) can't be created as {0:?}")]
|
||||
InvalidDepthDimension(wgt::TextureDimension, wgt::TextureFormat),
|
||||
#[error("Compressed texture ({1:?}) can't be created as {0:?}")]
|
||||
InvalidCompressedDimension(wgt::TextureDimension, wgt::TextureFormat),
|
||||
#[error(
|
||||
"Texture descriptor mip level count {requested} is invalid, maximum allowed is {maximum}"
|
||||
)]
|
||||
InvalidMipLevelCount { requested: u32, maximum: u32 },
|
||||
#[error(
|
||||
"Texture usages {0:?} are not allowed on a texture of type {1:?}{}",
|
||||
if *.2 { " due to downlevel restrictions" } else { "" }
|
||||
)]
|
||||
InvalidFormatUsages(wgt::TextureUsages, wgt::TextureFormat, bool),
|
||||
#[error("The view format {0:?} is not compatible with texture format {1:?}, only changing srgb-ness is allowed.")]
|
||||
InvalidViewFormat(wgt::TextureFormat, wgt::TextureFormat),
|
||||
#[error("Texture usages {0:?} are not allowed on a texture of dimensions {1:?}")]
|
||||
InvalidDimensionUsages(wgt::TextureUsages, wgt::TextureDimension),
|
||||
#[error("Texture usage STORAGE_BINDING is not allowed for multisampled textures")]
|
||||
InvalidMultisampledStorageBinding,
|
||||
#[error("Format {0:?} does not support multisampling")]
|
||||
InvalidMultisampledFormat(wgt::TextureFormat),
|
||||
#[error("Sample count {0} is not supported by format {1:?} on this device. It may be supported by your adapter through the TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES feature.")]
|
||||
InvalidSampleCount(u32, wgt::TextureFormat),
|
||||
#[error("Multisampled textures must have RENDER_ATTACHMENT usage")]
|
||||
MultisampledNotRenderAttachment,
|
||||
#[error("Texture format {0:?} can't be used due to missing features")]
|
||||
MissingFeatures(wgt::TextureFormat, #[source] MissingFeatures),
|
||||
#[error(transparent)]
|
||||
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for Texture<A> {
|
||||
const TYPE: &'static str = "Texture";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Borrow<TextureSelector> for Texture<A> {
|
||||
fn borrow(&self) -> &TextureSelector {
|
||||
&self.full_range
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes a [`TextureView`].
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize), serde(default))]
|
||||
pub struct TextureViewDescriptor<'a> {
|
||||
/// Debug label of the texture view.
|
||||
///
|
||||
/// This will show up in graphics debuggers for easy identification.
|
||||
pub label: Label<'a>,
|
||||
/// Format of the texture view, or `None` for the same format as the texture
|
||||
/// itself.
|
||||
///
|
||||
/// At this time, it must be the same the underlying format of the texture.
|
||||
pub format: Option<wgt::TextureFormat>,
|
||||
/// The dimension of the texture view.
|
||||
///
|
||||
/// - For 1D textures, this must be `D1`.
|
||||
/// - For 2D textures it must be one of `D2`, `D2Array`, `Cube`, or `CubeArray`.
|
||||
/// - For 3D textures it must be `D3`.
|
||||
pub dimension: Option<wgt::TextureViewDimension>,
|
||||
/// Range within the texture that is accessible via this view.
|
||||
pub range: wgt::ImageSubresourceRange,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct HalTextureViewDescriptor {
|
||||
pub format: wgt::TextureFormat,
|
||||
pub dimension: wgt::TextureViewDimension,
|
||||
pub range: wgt::ImageSubresourceRange,
|
||||
}
|
||||
|
||||
impl HalTextureViewDescriptor {
|
||||
pub fn aspects(&self) -> hal::FormatAspects {
|
||||
hal::FormatAspects::new(self.format, self.range.aspect)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, Error)]
|
||||
pub enum TextureViewNotRenderableReason {
|
||||
#[error("The texture this view references doesn't include the RENDER_ATTACHMENT usage. Provided usages: {0:?}")]
|
||||
Usage(wgt::TextureUsages),
|
||||
#[error("The dimension of this texture view is not 2D. View dimension: {0:?}")]
|
||||
Dimension(wgt::TextureViewDimension),
|
||||
#[error("This texture view has more than one mipmap level. View mipmap levels: {0:?}")]
|
||||
MipLevelCount(u32),
|
||||
#[error("This texture view has more than one array layer. View array layers: {0:?}")]
|
||||
ArrayLayerCount(u32),
|
||||
#[error(
|
||||
"The aspects of this texture view are a subset of the aspects in the original texture. Aspects: {0:?}"
|
||||
)]
|
||||
Aspects(hal::FormatAspects),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TextureView<A: hal::Api> {
|
||||
pub(crate) raw: A::TextureView,
|
||||
// The parent's refcount is held alive, but the parent may still be deleted
|
||||
// if it's a surface texture. TODO: make this cleaner.
|
||||
pub(crate) parent_id: Stored<TextureId>,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
//TODO: store device_id for quick access?
|
||||
pub(crate) desc: HalTextureViewDescriptor,
|
||||
pub(crate) format_features: wgt::TextureFormatFeatures,
|
||||
/// This is `Err` only if the texture view is not renderable
|
||||
pub(crate) render_extent: Result<wgt::Extent3d, TextureViewNotRenderableReason>,
|
||||
pub(crate) samples: u32,
|
||||
pub(crate) selector: TextureSelector,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateTextureViewError {
|
||||
#[error("Parent texture is invalid or destroyed")]
|
||||
InvalidTexture,
|
||||
#[error("Not enough memory left")]
|
||||
OutOfMemory,
|
||||
#[error("Invalid texture view dimension `{view:?}` with texture of dimension `{texture:?}`")]
|
||||
InvalidTextureViewDimension {
|
||||
view: wgt::TextureViewDimension,
|
||||
texture: wgt::TextureDimension,
|
||||
},
|
||||
#[error("Invalid texture view dimension `{0:?}` of a multisampled texture")]
|
||||
InvalidMultisampledTextureViewDimension(wgt::TextureViewDimension),
|
||||
#[error("Invalid texture depth `{depth}` for texture view of dimension `Cubemap`. Cubemap views must use images of size 6.")]
|
||||
InvalidCubemapTextureDepth { depth: u32 },
|
||||
#[error("Invalid texture depth `{depth}` for texture view of dimension `CubemapArray`. Cubemap views must use images with sizes which are a multiple of 6.")]
|
||||
InvalidCubemapArrayTextureDepth { depth: u32 },
|
||||
#[error("Source texture width and height must be equal for a texture view of dimension `Cube`/`CubeArray`")]
|
||||
InvalidCubeTextureViewSize,
|
||||
#[error("Mip level count is 0")]
|
||||
ZeroMipLevelCount,
|
||||
#[error("Array layer count is 0")]
|
||||
ZeroArrayLayerCount,
|
||||
#[error(
|
||||
"TextureView mip level count + base mip level {requested} must be <= Texture mip level count {total}"
|
||||
)]
|
||||
TooManyMipLevels { requested: u32, total: u32 },
|
||||
#[error("TextureView array layer count + base array layer {requested} must be <= Texture depth/array layer count {total}")]
|
||||
TooManyArrayLayers { requested: u32, total: u32 },
|
||||
#[error("Requested array layer count {requested} is not valid for the target view dimension {dim:?}")]
|
||||
InvalidArrayLayerCount {
|
||||
requested: u32,
|
||||
dim: wgt::TextureViewDimension,
|
||||
},
|
||||
#[error("Aspect {requested_aspect:?} is not in the source texture format {texture_format:?}")]
|
||||
InvalidAspect {
|
||||
texture_format: wgt::TextureFormat,
|
||||
requested_aspect: wgt::TextureAspect,
|
||||
},
|
||||
#[error("Unable to view texture {texture:?} as {view:?}")]
|
||||
FormatReinterpretation {
|
||||
texture: wgt::TextureFormat,
|
||||
view: wgt::TextureFormat,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum TextureViewDestroyError {}
|
||||
|
||||
impl<A: hal::Api> Resource for TextureView<A> {
|
||||
const TYPE: &'static str = "TextureView";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes a [`Sampler`]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
|
||||
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
|
||||
pub struct SamplerDescriptor<'a> {
|
||||
/// Debug label of the sampler.
|
||||
///
|
||||
/// This will show up in graphics debuggers for easy identification.
|
||||
pub label: Label<'a>,
|
||||
/// How to deal with out of bounds accesses in the u (i.e. x) direction
|
||||
pub address_modes: [wgt::AddressMode; 3],
|
||||
/// How to filter the texture when it needs to be magnified (made larger)
|
||||
pub mag_filter: wgt::FilterMode,
|
||||
/// How to filter the texture when it needs to be minified (made smaller)
|
||||
pub min_filter: wgt::FilterMode,
|
||||
/// How to filter between mip map levels
|
||||
pub mipmap_filter: wgt::FilterMode,
|
||||
/// Minimum level of detail (i.e. mip level) to use
|
||||
pub lod_min_clamp: f32,
|
||||
/// Maximum level of detail (i.e. mip level) to use
|
||||
pub lod_max_clamp: f32,
|
||||
/// If this is enabled, this is a comparison sampler using the given comparison function.
|
||||
pub compare: Option<wgt::CompareFunction>,
|
||||
/// Must be at least 1. If this is not 1, all filter modes must be linear.
|
||||
pub anisotropy_clamp: u16,
|
||||
/// Border color to use when address_mode is
|
||||
/// [`AddressMode::ClampToBorder`](wgt::AddressMode::ClampToBorder)
|
||||
pub border_color: Option<wgt::SamplerBorderColor>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Sampler<A: hal::Api> {
|
||||
pub(crate) raw: A::Sampler,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
/// `true` if this is a comparison sampler
|
||||
pub(crate) comparison: bool,
|
||||
/// `true` if this is a filtering sampler
|
||||
pub(crate) filtering: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum SamplerFilterErrorType {
|
||||
MagFilter,
|
||||
MinFilter,
|
||||
MipmapFilter,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for SamplerFilterErrorType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match *self {
|
||||
SamplerFilterErrorType::MagFilter => write!(f, "magFilter"),
|
||||
SamplerFilterErrorType::MinFilter => write!(f, "minFilter"),
|
||||
SamplerFilterErrorType::MipmapFilter => write!(f, "mipmapFilter"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateSamplerError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("Invalid lodMinClamp: {0}. Must be greater or equal to 0.0")]
|
||||
InvalidLodMinClamp(f32),
|
||||
#[error("Invalid lodMaxClamp: {lod_max_clamp}. Must be greater or equal to lodMinClamp (which is {lod_min_clamp}).")]
|
||||
InvalidLodMaxClamp {
|
||||
lod_min_clamp: f32,
|
||||
lod_max_clamp: f32,
|
||||
},
|
||||
#[error("Invalid anisotropic clamp: {0}. Must be at least 1.")]
|
||||
InvalidAnisotropy(u16),
|
||||
#[error("Invalid filter mode for {filter_type:?}: {filter_mode:?}. When anistropic clamp is not 1 (it is {anisotropic_clamp}), all filter modes must be linear.")]
|
||||
InvalidFilterModeWithAnisotropy {
|
||||
filter_type: SamplerFilterErrorType,
|
||||
filter_mode: wgt::FilterMode,
|
||||
anisotropic_clamp: u16,
|
||||
},
|
||||
#[error("Cannot create any more samplers")]
|
||||
TooManyObjects,
|
||||
/// AddressMode::ClampToBorder requires feature ADDRESS_MODE_CLAMP_TO_BORDER.
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for Sampler<A> {
|
||||
const TYPE: &'static str = "Sampler";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum CreateQuerySetError {
|
||||
#[error(transparent)]
|
||||
Device(#[from] DeviceError),
|
||||
#[error("QuerySets cannot be made with zero queries")]
|
||||
ZeroCount,
|
||||
#[error("{count} is too many queries for a single QuerySet. QuerySets cannot be made more than {maximum} queries.")]
|
||||
TooManyQueries { count: u32, maximum: u32 },
|
||||
#[error(transparent)]
|
||||
MissingFeatures(#[from] MissingFeatures),
|
||||
}
|
||||
|
||||
pub type QuerySetDescriptor<'a> = wgt::QuerySetDescriptor<Label<'a>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QuerySet<A: hal::Api> {
|
||||
pub(crate) raw: A::QuerySet,
|
||||
pub(crate) device_id: Stored<DeviceId>,
|
||||
pub(crate) life_guard: LifeGuard,
|
||||
pub(crate) desc: wgt::QuerySetDescriptor<()>,
|
||||
}
|
||||
|
||||
impl<A: hal::Api> Resource for QuerySet<A> {
|
||||
const TYPE: &'static str = "QuerySet";
|
||||
|
||||
fn life_guard(&self) -> &LifeGuard {
|
||||
&self.life_guard
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum DestroyError {
|
||||
#[error("Resource is invalid")]
|
||||
Invalid,
|
||||
#[error("Resource is already destroyed")]
|
||||
AlreadyDestroyed,
|
||||
}
|
||||
234
third-party/vendor/wgpu-core/src/storage.rs
vendored
Normal file
234
third-party/vendor/wgpu-core/src/storage.rs
vendored
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
use std::{marker::PhantomData, mem, ops};
|
||||
|
||||
use wgt::Backend;
|
||||
|
||||
use crate::{id, Epoch, Index};
|
||||
|
||||
/// An entry in a `Storage::map` table.
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum Element<T> {
|
||||
/// There are no live ids with this index.
|
||||
Vacant,
|
||||
|
||||
/// There is one live id with this index, allocated at the given
|
||||
/// epoch.
|
||||
Occupied(T, Epoch),
|
||||
|
||||
/// Like `Occupied`, but an error occurred when creating the
|
||||
/// resource.
|
||||
///
|
||||
/// The given `String` is the resource's descriptor label.
|
||||
Error(Epoch, String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct StorageReport {
|
||||
pub num_occupied: usize,
|
||||
pub num_vacant: usize,
|
||||
pub num_error: usize,
|
||||
pub element_size: usize,
|
||||
}
|
||||
|
||||
impl StorageReport {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.num_occupied + self.num_vacant + self.num_error == 0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct InvalidId;
|
||||
|
||||
/// A table of `T` values indexed by the id type `I`.
|
||||
///
|
||||
/// The table is represented as a vector indexed by the ids' index
|
||||
/// values, so you should use an id allocator like `IdentityManager`
|
||||
/// that keeps the index values dense and close to zero.
|
||||
#[derive(Debug)]
|
||||
pub struct Storage<T, I: id::TypedId> {
|
||||
pub(crate) map: Vec<Element<T>>,
|
||||
pub(crate) kind: &'static str,
|
||||
pub(crate) _phantom: PhantomData<I>,
|
||||
}
|
||||
|
||||
impl<T, I: id::TypedId> ops::Index<id::Valid<I>> for Storage<T, I> {
|
||||
type Output = T;
|
||||
fn index(&self, id: id::Valid<I>) -> &T {
|
||||
self.get(id.0).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, I: id::TypedId> ops::IndexMut<id::Valid<I>> for Storage<T, I> {
|
||||
fn index_mut(&mut self, id: id::Valid<I>) -> &mut T {
|
||||
self.get_mut(id.0).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, I: id::TypedId> Storage<T, I> {
|
||||
pub(crate) fn contains(&self, id: I) -> bool {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
match self.map.get(index as usize) {
|
||||
Some(&Element::Vacant) => false,
|
||||
Some(&Element::Occupied(_, storage_epoch) | &Element::Error(storage_epoch, _)) => {
|
||||
storage_epoch == epoch
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to get a reference to an item behind a potentially invalid ID.
|
||||
///
|
||||
/// Returns [`None`] if there is an epoch mismatch, or the entry is empty.
|
||||
///
|
||||
/// This function is primarily intended for the `as_hal` family of functions
|
||||
/// where you may need to fallibly get a object backed by an id that could
|
||||
/// be in a different hub.
|
||||
pub(crate) fn try_get(&self, id: I) -> Result<Option<&T>, InvalidId> {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
let (result, storage_epoch) = match self.map.get(index as usize) {
|
||||
Some(&Element::Occupied(ref v, epoch)) => (Ok(Some(v)), epoch),
|
||||
Some(&Element::Vacant) => return Ok(None),
|
||||
Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch),
|
||||
None => return Err(InvalidId),
|
||||
};
|
||||
assert_eq!(
|
||||
epoch, storage_epoch,
|
||||
"{}[{}] is no longer alive",
|
||||
self.kind, index
|
||||
);
|
||||
result
|
||||
}
|
||||
|
||||
/// Get a reference to an item behind a potentially invalid ID.
|
||||
/// Panics if there is an epoch mismatch, or the entry is empty.
|
||||
pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
let (result, storage_epoch) = match self.map.get(index as usize) {
|
||||
Some(&Element::Occupied(ref v, epoch)) => (Ok(v), epoch),
|
||||
Some(&Element::Vacant) => panic!("{}[{}] does not exist", self.kind, index),
|
||||
Some(&Element::Error(epoch, ..)) => (Err(InvalidId), epoch),
|
||||
None => return Err(InvalidId),
|
||||
};
|
||||
assert_eq!(
|
||||
epoch, storage_epoch,
|
||||
"{}[{}] is no longer alive",
|
||||
self.kind, index
|
||||
);
|
||||
result
|
||||
}
|
||||
|
||||
/// Get a mutable reference to an item behind a potentially invalid ID.
|
||||
/// Panics if there is an epoch mismatch, or the entry is empty.
|
||||
pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
let (result, storage_epoch) = match self.map.get_mut(index as usize) {
|
||||
Some(&mut Element::Occupied(ref mut v, epoch)) => (Ok(v), epoch),
|
||||
Some(&mut Element::Vacant) | None => panic!("{}[{}] does not exist", self.kind, index),
|
||||
Some(&mut Element::Error(epoch, ..)) => (Err(InvalidId), epoch),
|
||||
};
|
||||
assert_eq!(
|
||||
epoch, storage_epoch,
|
||||
"{}[{}] is no longer alive",
|
||||
self.kind, index
|
||||
);
|
||||
result
|
||||
}
|
||||
|
||||
pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &T {
|
||||
match self.map[id as usize] {
|
||||
Element::Occupied(ref v, _) => v,
|
||||
Element::Vacant => panic!("{}[{}] does not exist", self.kind, id),
|
||||
Element::Error(_, _) => panic!(""),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn label_for_invalid_id(&self, id: I) -> &str {
|
||||
let (index, _, _) = id.unzip();
|
||||
match self.map.get(index as usize) {
|
||||
Some(&Element::Error(_, ref label)) => label,
|
||||
_ => "",
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_impl(&mut self, index: usize, element: Element<T>) {
|
||||
if index >= self.map.len() {
|
||||
self.map.resize_with(index + 1, || Element::Vacant);
|
||||
}
|
||||
match std::mem::replace(&mut self.map[index], element) {
|
||||
Element::Vacant => {}
|
||||
_ => panic!("Index {index:?} is already occupied"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert(&mut self, id: I, value: T) {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
self.insert_impl(index as usize, Element::Occupied(value, epoch))
|
||||
}
|
||||
|
||||
pub(crate) fn insert_error(&mut self, id: I, label: &str) {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
self.insert_impl(index as usize, Element::Error(epoch, label.to_string()))
|
||||
}
|
||||
|
||||
pub(crate) fn force_replace(&mut self, id: I, value: T) {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
self.map[index as usize] = Element::Occupied(value, epoch);
|
||||
}
|
||||
|
||||
pub(crate) fn remove(&mut self, id: I) -> Option<T> {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
match std::mem::replace(&mut self.map[index as usize], Element::Vacant) {
|
||||
Element::Occupied(value, storage_epoch) => {
|
||||
assert_eq!(epoch, storage_epoch);
|
||||
Some(value)
|
||||
}
|
||||
Element::Error(..) => None,
|
||||
Element::Vacant => panic!("Cannot remove a vacant resource"),
|
||||
}
|
||||
}
|
||||
|
||||
// Prevents panic on out of range access, allows Vacant elements.
|
||||
pub(crate) fn _try_remove(&mut self, id: I) -> Option<T> {
|
||||
let (index, epoch, _) = id.unzip();
|
||||
if index as usize >= self.map.len() {
|
||||
None
|
||||
} else if let Element::Occupied(value, storage_epoch) =
|
||||
std::mem::replace(&mut self.map[index as usize], Element::Vacant)
|
||||
{
|
||||
assert_eq!(epoch, storage_epoch);
|
||||
Some(value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self, backend: Backend) -> impl Iterator<Item = (I, &T)> {
|
||||
self.map
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(move |(index, x)| match *x {
|
||||
Element::Occupied(ref value, storage_epoch) => {
|
||||
Some((I::zip(index as Index, storage_epoch, backend), value))
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
pub(crate) fn generate_report(&self) -> StorageReport {
|
||||
let mut report = StorageReport {
|
||||
element_size: mem::size_of::<T>(),
|
||||
..Default::default()
|
||||
};
|
||||
for element in self.map.iter() {
|
||||
match *element {
|
||||
Element::Occupied(..) => report.num_occupied += 1,
|
||||
Element::Vacant => report.num_vacant += 1,
|
||||
Element::Error(..) => report.num_error += 1,
|
||||
}
|
||||
}
|
||||
report
|
||||
}
|
||||
}
|
||||
792
third-party/vendor/wgpu-core/src/track/buffer.rs
vendored
Normal file
792
third-party/vendor/wgpu-core/src/track/buffer.rs
vendored
Normal file
|
|
@ -0,0 +1,792 @@
|
|||
/*! Buffer Trackers
|
||||
*
|
||||
* Buffers are represented by a single state for the whole resource,
|
||||
* a 16 bit bitflag of buffer usages. Because there is only ever
|
||||
* one subresource, they have no selector.
|
||||
!*/
|
||||
|
||||
use std::{borrow::Cow, marker::PhantomData, vec::Drain};
|
||||
|
||||
use super::PendingTransition;
|
||||
use crate::{
|
||||
hal_api::HalApi,
|
||||
id::{BufferId, TypedId, Valid},
|
||||
resource::Buffer,
|
||||
storage,
|
||||
track::{
|
||||
invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider,
|
||||
ResourceUses, UsageConflict,
|
||||
},
|
||||
LifeGuard, RefCount,
|
||||
};
|
||||
use hal::BufferUses;
|
||||
use wgt::{strict_assert, strict_assert_eq};
|
||||
|
||||
impl ResourceUses for BufferUses {
|
||||
const EXCLUSIVE: Self = Self::EXCLUSIVE;
|
||||
|
||||
type Id = BufferId;
|
||||
type Selector = ();
|
||||
|
||||
fn bits(self) -> u16 {
|
||||
Self::bits(&self)
|
||||
}
|
||||
|
||||
fn all_ordered(self) -> bool {
|
||||
Self::ORDERED.contains(self)
|
||||
}
|
||||
|
||||
fn any_exclusive(self) -> bool {
|
||||
self.intersects(Self::EXCLUSIVE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores all the buffers that a bind group stores.
|
||||
pub(crate) struct BufferBindGroupState<A: HalApi> {
|
||||
buffers: Vec<(Valid<BufferId>, RefCount, BufferUses)>,
|
||||
|
||||
_phantom: PhantomData<A>,
|
||||
}
|
||||
impl<A: HalApi> BufferBindGroupState<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
buffers: Vec::new(),
|
||||
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Optimize the buffer bind group state by sorting it by ID.
|
||||
///
|
||||
/// When this list of states is merged into a tracker, the memory
|
||||
/// accesses will be in a constant assending order.
|
||||
pub(crate) fn optimize(&mut self) {
|
||||
self.buffers
|
||||
.sort_unstable_by_key(|&(id, _, _)| id.0.unzip().0);
|
||||
}
|
||||
|
||||
/// Returns a list of all buffers tracked. May contain duplicates.
|
||||
pub fn used(&self) -> impl Iterator<Item = Valid<BufferId>> + '_ {
|
||||
self.buffers.iter().map(|&(id, _, _)| id)
|
||||
}
|
||||
|
||||
/// Adds the given resource with the given state.
|
||||
pub fn add_single<'a>(
|
||||
&mut self,
|
||||
storage: &'a storage::Storage<Buffer<A>, BufferId>,
|
||||
id: BufferId,
|
||||
state: BufferUses,
|
||||
) -> Option<&'a Buffer<A>> {
|
||||
let buffer = storage.get(id).ok()?;
|
||||
|
||||
self.buffers
|
||||
.push((Valid(id), buffer.life_guard.add_ref(), state));
|
||||
|
||||
Some(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores all buffer state within a single usage scope.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct BufferUsageScope<A: HalApi> {
|
||||
state: Vec<BufferUses>,
|
||||
|
||||
metadata: ResourceMetadata<A>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> BufferUsageScope<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: Vec::new(),
|
||||
|
||||
metadata: ResourceMetadata::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn tracker_assert_in_bounds(&self, index: usize) {
|
||||
strict_assert!(index < self.state.len());
|
||||
self.metadata.tracker_assert_in_bounds(index);
|
||||
}
|
||||
|
||||
/// Sets the size of all the vectors inside the tracker.
|
||||
///
|
||||
/// Must be called with the highest possible Buffer ID before
|
||||
/// all unsafe functions are called.
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.state.resize(size, BufferUses::empty());
|
||||
self.metadata.set_size(size);
|
||||
}
|
||||
|
||||
/// Extend the vectors to let the given index be valid.
|
||||
fn allow_index(&mut self, index: usize) {
|
||||
if index >= self.state.len() {
|
||||
self.set_size(index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a list of all buffers tracked.
|
||||
pub fn used(&self) -> impl Iterator<Item = Valid<BufferId>> + '_ {
|
||||
self.metadata.owned_ids()
|
||||
}
|
||||
|
||||
/// Merge the list of buffer states in the given bind group into this usage scope.
|
||||
///
|
||||
/// If any of the resulting states is invalid, stops the merge and returns a usage
|
||||
/// conflict with the details of the invalid state.
|
||||
///
|
||||
/// Because bind groups do not check if the union of all their states is valid,
|
||||
/// this method is allowed to return Err on the first bind group bound.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
|
||||
/// method is called.
|
||||
pub unsafe fn merge_bind_group(
|
||||
&mut self,
|
||||
bind_group: &BufferBindGroupState<A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
for &(id, ref ref_count, state) in &bind_group.buffers {
|
||||
let (index32, epoch, _) = id.0.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
unsafe {
|
||||
insert_or_merge(
|
||||
None,
|
||||
None,
|
||||
&mut self.state,
|
||||
&mut self.metadata,
|
||||
index32,
|
||||
index,
|
||||
BufferStateProvider::Direct { state },
|
||||
ResourceMetadataProvider::Direct {
|
||||
epoch,
|
||||
ref_count: Cow::Borrowed(ref_count),
|
||||
},
|
||||
)?
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merge the list of buffer states in the given usage scope into this UsageScope.
|
||||
///
|
||||
/// If any of the resulting states is invalid, stops the merge and returns a usage
|
||||
/// conflict with the details of the invalid state.
|
||||
///
|
||||
/// If the given tracker uses IDs higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> {
|
||||
let incoming_size = scope.state.len();
|
||||
if incoming_size > self.state.len() {
|
||||
self.set_size(incoming_size);
|
||||
}
|
||||
|
||||
for index in scope.metadata.owned_indices() {
|
||||
self.tracker_assert_in_bounds(index);
|
||||
scope.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
insert_or_merge(
|
||||
None,
|
||||
None,
|
||||
&mut self.state,
|
||||
&mut self.metadata,
|
||||
index as u32,
|
||||
index,
|
||||
BufferStateProvider::Indirect {
|
||||
state: &scope.state,
|
||||
},
|
||||
ResourceMetadataProvider::Indirect {
|
||||
metadata: &scope.metadata,
|
||||
},
|
||||
)?;
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merge a single state into the UsageScope.
|
||||
///
|
||||
/// If the resulting state is invalid, returns a usage
|
||||
/// conflict with the details of the invalid state.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn merge_single<'a>(
|
||||
&mut self,
|
||||
storage: &'a storage::Storage<Buffer<A>, BufferId>,
|
||||
id: BufferId,
|
||||
new_state: BufferUses,
|
||||
) -> Result<&'a Buffer<A>, UsageConflict> {
|
||||
let buffer = storage
|
||||
.get(id)
|
||||
.map_err(|_| UsageConflict::BufferInvalid { id })?;
|
||||
|
||||
let (index32, epoch, _) = id.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
self.allow_index(index);
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
insert_or_merge(
|
||||
Some(&buffer.life_guard),
|
||||
None,
|
||||
&mut self.state,
|
||||
&mut self.metadata,
|
||||
index32,
|
||||
index,
|
||||
BufferStateProvider::Direct { state: new_state },
|
||||
ResourceMetadataProvider::Resource { epoch },
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores all buffer state within a command buffer or device.
|
||||
pub(crate) struct BufferTracker<A: HalApi> {
|
||||
start: Vec<BufferUses>,
|
||||
end: Vec<BufferUses>,
|
||||
|
||||
metadata: ResourceMetadata<A>,
|
||||
|
||||
temp: Vec<PendingTransition<BufferUses>>,
|
||||
}
|
||||
impl<A: HalApi> BufferTracker<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
start: Vec::new(),
|
||||
end: Vec::new(),
|
||||
|
||||
metadata: ResourceMetadata::new(),
|
||||
|
||||
temp: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn tracker_assert_in_bounds(&self, index: usize) {
|
||||
strict_assert!(index < self.start.len());
|
||||
strict_assert!(index < self.end.len());
|
||||
self.metadata.tracker_assert_in_bounds(index);
|
||||
}
|
||||
|
||||
/// Sets the size of all the vectors inside the tracker.
|
||||
///
|
||||
/// Must be called with the highest possible Buffer ID before
|
||||
/// all unsafe functions are called.
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.start.resize(size, BufferUses::empty());
|
||||
self.end.resize(size, BufferUses::empty());
|
||||
|
||||
self.metadata.set_size(size);
|
||||
}
|
||||
|
||||
/// Extend the vectors to let the given index be valid.
|
||||
fn allow_index(&mut self, index: usize) {
|
||||
if index >= self.start.len() {
|
||||
self.set_size(index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a list of all buffers tracked.
|
||||
pub fn used(&self) -> impl Iterator<Item = Valid<BufferId>> + '_ {
|
||||
self.metadata.owned_ids()
|
||||
}
|
||||
|
||||
/// Drains all currently pending transitions.
|
||||
pub fn drain(&mut self) -> Drain<'_, PendingTransition<BufferUses>> {
|
||||
self.temp.drain(..)
|
||||
}
|
||||
|
||||
/// Inserts a single buffer and its state into the resource tracker.
|
||||
///
|
||||
/// If the resource already exists in the tracker, this will panic.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn insert_single(&mut self, id: Valid<BufferId>, ref_count: RefCount, state: BufferUses) {
|
||||
let (index32, epoch, _) = id.0.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
self.allow_index(index);
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
let currently_owned = self.metadata.contains_unchecked(index);
|
||||
|
||||
if currently_owned {
|
||||
panic!("Tried to insert buffer already tracked");
|
||||
}
|
||||
|
||||
insert(
|
||||
None,
|
||||
Some(&mut self.start),
|
||||
&mut self.end,
|
||||
&mut self.metadata,
|
||||
index,
|
||||
BufferStateProvider::Direct { state },
|
||||
None,
|
||||
ResourceMetadataProvider::Direct {
|
||||
epoch,
|
||||
ref_count: Cow::Owned(ref_count),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the state of a single buffer.
|
||||
///
|
||||
/// If a transition is needed to get the buffer into the given state, that transition
|
||||
/// is returned. No more than one transition is needed.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn set_single<'a>(
|
||||
&mut self,
|
||||
storage: &'a storage::Storage<Buffer<A>, BufferId>,
|
||||
id: BufferId,
|
||||
state: BufferUses,
|
||||
) -> Option<(&'a Buffer<A>, Option<PendingTransition<BufferUses>>)> {
|
||||
let value = storage.get(id).ok()?;
|
||||
|
||||
let (index32, epoch, _) = id.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
self.allow_index(index);
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
insert_or_barrier_update(
|
||||
Some(&value.life_guard),
|
||||
Some(&mut self.start),
|
||||
&mut self.end,
|
||||
&mut self.metadata,
|
||||
index32,
|
||||
index,
|
||||
BufferStateProvider::Direct { state },
|
||||
None,
|
||||
ResourceMetadataProvider::Resource { epoch },
|
||||
&mut self.temp,
|
||||
)
|
||||
};
|
||||
|
||||
strict_assert!(self.temp.len() <= 1);
|
||||
|
||||
Some((value, self.temp.pop()))
|
||||
}
|
||||
|
||||
/// Sets the given state for all buffers in the given tracker.
|
||||
///
|
||||
/// If a transition is needed to get the buffers into the needed state,
|
||||
/// those transitions are stored within the tracker. A subsequent
|
||||
/// call to [`Self::drain`] is needed to get those transitions.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn set_from_tracker(&mut self, tracker: &Self) {
|
||||
let incoming_size = tracker.start.len();
|
||||
if incoming_size > self.start.len() {
|
||||
self.set_size(incoming_size);
|
||||
}
|
||||
|
||||
for index in tracker.metadata.owned_indices() {
|
||||
self.tracker_assert_in_bounds(index);
|
||||
tracker.tracker_assert_in_bounds(index);
|
||||
unsafe {
|
||||
insert_or_barrier_update(
|
||||
None,
|
||||
Some(&mut self.start),
|
||||
&mut self.end,
|
||||
&mut self.metadata,
|
||||
index as u32,
|
||||
index,
|
||||
BufferStateProvider::Indirect {
|
||||
state: &tracker.start,
|
||||
},
|
||||
Some(BufferStateProvider::Indirect {
|
||||
state: &tracker.end,
|
||||
}),
|
||||
ResourceMetadataProvider::Indirect {
|
||||
metadata: &tracker.metadata,
|
||||
},
|
||||
&mut self.temp,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the given state for all buffers in the given UsageScope.
|
||||
///
|
||||
/// If a transition is needed to get the buffers into the needed state,
|
||||
/// those transitions are stored within the tracker. A subsequent
|
||||
/// call to [`Self::drain`] is needed to get those transitions.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn set_from_usage_scope(&mut self, scope: &BufferUsageScope<A>) {
|
||||
let incoming_size = scope.state.len();
|
||||
if incoming_size > self.start.len() {
|
||||
self.set_size(incoming_size);
|
||||
}
|
||||
|
||||
for index in scope.metadata.owned_indices() {
|
||||
self.tracker_assert_in_bounds(index);
|
||||
scope.tracker_assert_in_bounds(index);
|
||||
unsafe {
|
||||
insert_or_barrier_update(
|
||||
None,
|
||||
Some(&mut self.start),
|
||||
&mut self.end,
|
||||
&mut self.metadata,
|
||||
index as u32,
|
||||
index,
|
||||
BufferStateProvider::Indirect {
|
||||
state: &scope.state,
|
||||
},
|
||||
None,
|
||||
ResourceMetadataProvider::Indirect {
|
||||
metadata: &scope.metadata,
|
||||
},
|
||||
&mut self.temp,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates through all buffers in the given bind group and adopts
|
||||
/// the state given for those buffers in the UsageScope. It also
|
||||
/// removes all touched buffers from the usage scope.
|
||||
///
|
||||
/// If a transition is needed to get the buffers into the needed state,
|
||||
/// those transitions are stored within the tracker. A subsequent
|
||||
/// call to [`Self::drain`] is needed to get those transitions.
|
||||
///
|
||||
/// This is a really funky method used by Compute Passes to generate
|
||||
/// barriers after a call to dispatch without needing to iterate
|
||||
/// over all elements in the usage scope. We use each the
|
||||
/// a given iterator of ids as a source of which IDs to look at.
|
||||
/// All the IDs must have first been added to the usage scope.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
|
||||
/// method is called.
|
||||
pub unsafe fn set_and_remove_from_usage_scope_sparse(
|
||||
&mut self,
|
||||
scope: &mut BufferUsageScope<A>,
|
||||
id_source: impl IntoIterator<Item = Valid<BufferId>>,
|
||||
) {
|
||||
let incoming_size = scope.state.len();
|
||||
if incoming_size > self.start.len() {
|
||||
self.set_size(incoming_size);
|
||||
}
|
||||
|
||||
for id in id_source {
|
||||
let (index32, _, _) = id.0.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
scope.tracker_assert_in_bounds(index);
|
||||
|
||||
if unsafe { !scope.metadata.contains_unchecked(index) } {
|
||||
continue;
|
||||
}
|
||||
unsafe {
|
||||
insert_or_barrier_update(
|
||||
None,
|
||||
Some(&mut self.start),
|
||||
&mut self.end,
|
||||
&mut self.metadata,
|
||||
index as u32,
|
||||
index,
|
||||
BufferStateProvider::Indirect {
|
||||
state: &scope.state,
|
||||
},
|
||||
None,
|
||||
ResourceMetadataProvider::Indirect {
|
||||
metadata: &scope.metadata,
|
||||
},
|
||||
&mut self.temp,
|
||||
)
|
||||
};
|
||||
|
||||
unsafe { scope.metadata.remove(index) };
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the given resource from the tracker iff we have the last reference to the
|
||||
/// resource and the epoch matches.
|
||||
///
|
||||
/// Returns true if the resource was removed.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// false will be returned.
|
||||
pub fn remove_abandoned(&mut self, id: Valid<BufferId>) -> bool {
|
||||
let (index32, epoch, _) = id.0.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
if index > self.metadata.size() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
if self.metadata.contains_unchecked(index) {
|
||||
let existing_epoch = self.metadata.get_epoch_unchecked(index);
|
||||
let existing_ref_count = self.metadata.get_ref_count_unchecked(index);
|
||||
|
||||
if existing_epoch == epoch && existing_ref_count.load() == 1 {
|
||||
self.metadata.remove(index);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Source of Buffer State.
|
||||
#[derive(Debug, Clone)]
|
||||
enum BufferStateProvider<'a> {
|
||||
/// Get a state that was provided directly.
|
||||
Direct { state: BufferUses },
|
||||
/// Get a state from an an array of states.
|
||||
Indirect { state: &'a [BufferUses] },
|
||||
}
|
||||
impl BufferStateProvider<'_> {
|
||||
/// Gets the state from the provider, given a resource ID index.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Index must be in bounds for the indirect source iff this is in the indirect state.
|
||||
#[inline(always)]
|
||||
unsafe fn get_state(&self, index: usize) -> BufferUses {
|
||||
match *self {
|
||||
BufferStateProvider::Direct { state } => state,
|
||||
BufferStateProvider::Indirect { state } => {
|
||||
strict_assert!(index < state.len());
|
||||
*unsafe { state.get_unchecked(index) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Does an insertion operation if the index isn't tracked
|
||||
/// in the current metadata, otherwise merges the given state
|
||||
/// with the current state. If the merging would cause
|
||||
/// a conflict, returns that usage conflict.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Indexes must be valid indexes into all arrays passed in
|
||||
/// to this function, either directly or via metadata or provider structs.
|
||||
#[inline(always)]
|
||||
unsafe fn insert_or_merge<A: HalApi>(
|
||||
life_guard: Option<&LifeGuard>,
|
||||
start_states: Option<&mut [BufferUses]>,
|
||||
current_states: &mut [BufferUses],
|
||||
resource_metadata: &mut ResourceMetadata<A>,
|
||||
index32: u32,
|
||||
index: usize,
|
||||
state_provider: BufferStateProvider<'_>,
|
||||
metadata_provider: ResourceMetadataProvider<'_, A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
|
||||
|
||||
if !currently_owned {
|
||||
unsafe {
|
||||
insert(
|
||||
life_guard,
|
||||
start_states,
|
||||
current_states,
|
||||
resource_metadata,
|
||||
index,
|
||||
state_provider,
|
||||
None,
|
||||
metadata_provider,
|
||||
)
|
||||
};
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
merge(
|
||||
current_states,
|
||||
index32,
|
||||
index,
|
||||
state_provider,
|
||||
metadata_provider,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// If the resource isn't tracked
|
||||
/// - Inserts the given resource.
|
||||
/// - Uses the `start_state_provider` to populate `start_states`
|
||||
/// - Uses either `end_state_provider` or `start_state_provider`
|
||||
/// to populate `current_states`.
|
||||
/// If the resource is tracked
|
||||
/// - Inserts barriers from the state in `current_states`
|
||||
/// to the state provided by `start_state_provider`.
|
||||
/// - Updates the `current_states` with either the state from
|
||||
/// `end_state_provider` or `start_state_provider`.
|
||||
///
|
||||
/// Any barriers are added to the barrier vector.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Indexes must be valid indexes into all arrays passed in
|
||||
/// to this function, either directly or via metadata or provider structs.
|
||||
#[inline(always)]
|
||||
unsafe fn insert_or_barrier_update<A: HalApi>(
|
||||
life_guard: Option<&LifeGuard>,
|
||||
start_states: Option<&mut [BufferUses]>,
|
||||
current_states: &mut [BufferUses],
|
||||
resource_metadata: &mut ResourceMetadata<A>,
|
||||
index32: u32,
|
||||
index: usize,
|
||||
start_state_provider: BufferStateProvider<'_>,
|
||||
end_state_provider: Option<BufferStateProvider<'_>>,
|
||||
metadata_provider: ResourceMetadataProvider<'_, A>,
|
||||
barriers: &mut Vec<PendingTransition<BufferUses>>,
|
||||
) {
|
||||
let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
|
||||
|
||||
if !currently_owned {
|
||||
unsafe {
|
||||
insert(
|
||||
life_guard,
|
||||
start_states,
|
||||
current_states,
|
||||
resource_metadata,
|
||||
index,
|
||||
start_state_provider,
|
||||
end_state_provider,
|
||||
metadata_provider,
|
||||
)
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone());
|
||||
unsafe {
|
||||
barrier(
|
||||
current_states,
|
||||
index32,
|
||||
index,
|
||||
start_state_provider,
|
||||
barriers,
|
||||
)
|
||||
};
|
||||
|
||||
unsafe { update(current_states, index, update_state_provider) };
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn insert<A: HalApi>(
|
||||
life_guard: Option<&LifeGuard>,
|
||||
start_states: Option<&mut [BufferUses]>,
|
||||
current_states: &mut [BufferUses],
|
||||
resource_metadata: &mut ResourceMetadata<A>,
|
||||
index: usize,
|
||||
start_state_provider: BufferStateProvider<'_>,
|
||||
end_state_provider: Option<BufferStateProvider<'_>>,
|
||||
metadata_provider: ResourceMetadataProvider<'_, A>,
|
||||
) {
|
||||
let new_start_state = unsafe { start_state_provider.get_state(index) };
|
||||
let new_end_state =
|
||||
end_state_provider.map_or(new_start_state, |p| unsafe { p.get_state(index) });
|
||||
|
||||
// This should only ever happen with a wgpu bug, but let's just double
|
||||
// check that resource states don't have any conflicts.
|
||||
strict_assert_eq!(invalid_resource_state(new_start_state), false);
|
||||
strict_assert_eq!(invalid_resource_state(new_end_state), false);
|
||||
|
||||
log::trace!("\tbuf {index}: insert {new_start_state:?}..{new_end_state:?}");
|
||||
|
||||
unsafe {
|
||||
if let Some(&mut ref mut start_state) = start_states {
|
||||
*start_state.get_unchecked_mut(index) = new_start_state;
|
||||
}
|
||||
*current_states.get_unchecked_mut(index) = new_end_state;
|
||||
|
||||
let (epoch, ref_count) = metadata_provider.get_own(life_guard, index);
|
||||
resource_metadata.insert(index, epoch, ref_count);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn merge<A: HalApi>(
|
||||
current_states: &mut [BufferUses],
|
||||
index32: u32,
|
||||
index: usize,
|
||||
state_provider: BufferStateProvider<'_>,
|
||||
metadata_provider: ResourceMetadataProvider<'_, A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
let current_state = unsafe { current_states.get_unchecked_mut(index) };
|
||||
let new_state = unsafe { state_provider.get_state(index) };
|
||||
|
||||
let merged_state = *current_state | new_state;
|
||||
|
||||
if invalid_resource_state(merged_state) {
|
||||
return Err(UsageConflict::from_buffer(
|
||||
BufferId::zip(
|
||||
index32,
|
||||
unsafe { metadata_provider.get_epoch(index) },
|
||||
A::VARIANT,
|
||||
),
|
||||
*current_state,
|
||||
new_state,
|
||||
));
|
||||
}
|
||||
|
||||
log::trace!("\tbuf {index32}: merge {current_state:?} + {new_state:?}");
|
||||
|
||||
*current_state = merged_state;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn barrier(
|
||||
current_states: &mut [BufferUses],
|
||||
index32: u32,
|
||||
index: usize,
|
||||
state_provider: BufferStateProvider<'_>,
|
||||
barriers: &mut Vec<PendingTransition<BufferUses>>,
|
||||
) {
|
||||
let current_state = unsafe { *current_states.get_unchecked(index) };
|
||||
let new_state = unsafe { state_provider.get_state(index) };
|
||||
|
||||
if skip_barrier(current_state, new_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
barriers.push(PendingTransition {
|
||||
id: index32,
|
||||
selector: (),
|
||||
usage: current_state..new_state,
|
||||
});
|
||||
|
||||
log::trace!("\tbuf {index32}: transition {current_state:?} -> {new_state:?}");
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn update(
|
||||
current_states: &mut [BufferUses],
|
||||
index: usize,
|
||||
state_provider: BufferStateProvider<'_>,
|
||||
) {
|
||||
let current_state = unsafe { current_states.get_unchecked_mut(index) };
|
||||
let new_state = unsafe { state_provider.get_state(index) };
|
||||
|
||||
*current_state = new_state;
|
||||
}
|
||||
269
third-party/vendor/wgpu-core/src/track/metadata.rs
vendored
Normal file
269
third-party/vendor/wgpu-core/src/track/metadata.rs
vendored
Normal file
|
|
@ -0,0 +1,269 @@
|
|||
//! The `ResourceMetadata` type.
|
||||
|
||||
use crate::{
|
||||
hal_api::HalApi,
|
||||
id::{self, TypedId},
|
||||
Epoch, LifeGuard, RefCount,
|
||||
};
|
||||
use bit_vec::BitVec;
|
||||
use std::{borrow::Cow, marker::PhantomData, mem};
|
||||
use wgt::strict_assert;
|
||||
|
||||
/// A set of resources, holding a [`RefCount`] and epoch for each member.
|
||||
///
|
||||
/// Testing for membership is fast, and iterating over members is
|
||||
/// reasonably fast in practice. Storage consumption is proportional
|
||||
/// to the largest id index of any member, not to the number of
|
||||
/// members, but a bit vector tracks occupancy, so iteration touches
|
||||
/// only occupied elements.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct ResourceMetadata<A: HalApi> {
|
||||
/// If the resource with index `i` is a member, `owned[i]` is `true`.
|
||||
owned: BitVec<usize>,
|
||||
|
||||
/// A vector parallel to `owned`, holding clones of members' `RefCount`s.
|
||||
ref_counts: Vec<Option<RefCount>>,
|
||||
|
||||
/// A vector parallel to `owned`, holding the epoch of each members' id.
|
||||
epochs: Vec<Epoch>,
|
||||
|
||||
/// This tells Rust that this type should be covariant with `A`.
|
||||
_phantom: PhantomData<A>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> ResourceMetadata<A> {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {
|
||||
owned: BitVec::default(),
|
||||
ref_counts: Vec::new(),
|
||||
epochs: Vec::new(),
|
||||
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of indices we can accommodate.
|
||||
pub(super) fn size(&self) -> usize {
|
||||
self.owned.len()
|
||||
}
|
||||
|
||||
pub(super) fn set_size(&mut self, size: usize) {
|
||||
self.ref_counts.resize(size, None);
|
||||
self.epochs.resize(size, u32::MAX);
|
||||
|
||||
resize_bitvec(&mut self.owned, size);
|
||||
}
|
||||
|
||||
/// Ensures a given index is in bounds for all arrays and does
|
||||
/// sanity checks of the presence of a refcount.
|
||||
///
|
||||
/// In release mode this function is completely empty and is removed.
|
||||
#[cfg_attr(not(feature = "strict_asserts"), allow(unused_variables))]
|
||||
pub(super) fn tracker_assert_in_bounds(&self, index: usize) {
|
||||
strict_assert!(index < self.owned.len());
|
||||
strict_assert!(index < self.ref_counts.len());
|
||||
strict_assert!(index < self.epochs.len());
|
||||
|
||||
strict_assert!(if self.contains(index) {
|
||||
self.ref_counts[index].is_some()
|
||||
} else {
|
||||
true
|
||||
});
|
||||
}
|
||||
|
||||
/// Returns true if the tracker owns no resources.
|
||||
///
|
||||
/// This is a O(n) operation.
|
||||
pub(super) fn is_empty(&self) -> bool {
|
||||
!self.owned.any()
|
||||
}
|
||||
|
||||
/// Returns true if the set contains the resource with the given index.
|
||||
pub(super) fn contains(&self, index: usize) -> bool {
|
||||
self.owned[index]
|
||||
}
|
||||
|
||||
/// Returns true if the set contains the resource with the given index.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The given `index` must be in bounds for this `ResourceMetadata`'s
|
||||
/// existing tables. See `tracker_assert_in_bounds`.
|
||||
#[inline(always)]
|
||||
pub(super) unsafe fn contains_unchecked(&self, index: usize) -> bool {
|
||||
unsafe { self.owned.get(index).unwrap_unchecked() }
|
||||
}
|
||||
|
||||
/// Insert a resource into the set.
|
||||
///
|
||||
/// Add the resource with the given index, epoch, and reference count to the
|
||||
/// set.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The given `index` must be in bounds for this `ResourceMetadata`'s
|
||||
/// existing tables. See `tracker_assert_in_bounds`.
|
||||
#[inline(always)]
|
||||
pub(super) unsafe fn insert(&mut self, index: usize, epoch: Epoch, ref_count: RefCount) {
|
||||
self.owned.set(index, true);
|
||||
unsafe {
|
||||
*self.epochs.get_unchecked_mut(index) = epoch;
|
||||
*self.ref_counts.get_unchecked_mut(index) = Some(ref_count);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the [`RefCount`] of the resource with the given index.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The given `index` must be in bounds for this `ResourceMetadata`'s
|
||||
/// existing tables. See `tracker_assert_in_bounds`.
|
||||
#[inline(always)]
|
||||
pub(super) unsafe fn get_ref_count_unchecked(&self, index: usize) -> &RefCount {
|
||||
unsafe {
|
||||
self.ref_counts
|
||||
.get_unchecked(index)
|
||||
.as_ref()
|
||||
.unwrap_unchecked()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the [`Epoch`] of the id of the resource with the given index.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The given `index` must be in bounds for this `ResourceMetadata`'s
|
||||
/// existing tables. See `tracker_assert_in_bounds`.
|
||||
#[inline(always)]
|
||||
pub(super) unsafe fn get_epoch_unchecked(&self, index: usize) -> Epoch {
|
||||
unsafe { *self.epochs.get_unchecked(index) }
|
||||
}
|
||||
|
||||
/// Returns an iterator over the ids for all resources owned by `self`.
|
||||
pub(super) fn owned_ids<Id: TypedId>(&self) -> impl Iterator<Item = id::Valid<Id>> + '_ {
|
||||
if !self.owned.is_empty() {
|
||||
self.tracker_assert_in_bounds(self.owned.len() - 1)
|
||||
};
|
||||
iterate_bitvec_indices(&self.owned).map(move |index| {
|
||||
let epoch = unsafe { *self.epochs.get_unchecked(index) };
|
||||
id::Valid(Id::zip(index as u32, epoch, A::VARIANT))
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns an iterator over the indices of all resources owned by `self`.
|
||||
pub(super) fn owned_indices(&self) -> impl Iterator<Item = usize> + '_ {
|
||||
if !self.owned.is_empty() {
|
||||
self.tracker_assert_in_bounds(self.owned.len() - 1)
|
||||
};
|
||||
iterate_bitvec_indices(&self.owned)
|
||||
}
|
||||
|
||||
/// Remove the resource with the given index from the set.
|
||||
pub(super) unsafe fn remove(&mut self, index: usize) {
|
||||
unsafe {
|
||||
*self.ref_counts.get_unchecked_mut(index) = None;
|
||||
*self.epochs.get_unchecked_mut(index) = u32::MAX;
|
||||
}
|
||||
self.owned.set(index, false);
|
||||
}
|
||||
}
|
||||
|
||||
/// A source of resource metadata.
|
||||
///
|
||||
/// This is used to abstract over the various places
|
||||
/// trackers can get new resource metadata from.
|
||||
pub(super) enum ResourceMetadataProvider<'a, A: HalApi> {
|
||||
/// Comes directly from explicit values.
|
||||
Direct {
|
||||
epoch: Epoch,
|
||||
ref_count: Cow<'a, RefCount>,
|
||||
},
|
||||
/// Comes from another metadata tracker.
|
||||
Indirect { metadata: &'a ResourceMetadata<A> },
|
||||
/// The epoch is given directly, but the life count comes from the resource itself.
|
||||
Resource { epoch: Epoch },
|
||||
}
|
||||
impl<A: HalApi> ResourceMetadataProvider<'_, A> {
|
||||
/// Get the epoch and an owned refcount from this.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - The index must be in bounds of the metadata tracker if this uses an indirect source.
|
||||
/// - life_guard must be Some if this uses a Resource source.
|
||||
#[inline(always)]
|
||||
pub(super) unsafe fn get_own(
|
||||
self,
|
||||
life_guard: Option<&LifeGuard>,
|
||||
index: usize,
|
||||
) -> (Epoch, RefCount) {
|
||||
match self {
|
||||
ResourceMetadataProvider::Direct { epoch, ref_count } => {
|
||||
(epoch, ref_count.into_owned())
|
||||
}
|
||||
ResourceMetadataProvider::Indirect { metadata } => {
|
||||
metadata.tracker_assert_in_bounds(index);
|
||||
(unsafe { *metadata.epochs.get_unchecked(index) }, {
|
||||
let ref_count = unsafe { metadata.ref_counts.get_unchecked(index) };
|
||||
unsafe { ref_count.clone().unwrap_unchecked() }
|
||||
})
|
||||
}
|
||||
ResourceMetadataProvider::Resource { epoch } => {
|
||||
strict_assert!(life_guard.is_some());
|
||||
(epoch, unsafe { life_guard.unwrap_unchecked() }.add_ref())
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Get the epoch from this.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// - The index must be in bounds of the metadata tracker if this uses an indirect source.
|
||||
#[inline(always)]
|
||||
pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch {
|
||||
match self {
|
||||
ResourceMetadataProvider::Direct { epoch, .. }
|
||||
| ResourceMetadataProvider::Resource { epoch, .. } => epoch,
|
||||
ResourceMetadataProvider::Indirect { metadata } => {
|
||||
metadata.tracker_assert_in_bounds(index);
|
||||
unsafe { *metadata.epochs.get_unchecked(index) }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is.
|
||||
fn resize_bitvec<B: bit_vec::BitBlock>(vec: &mut BitVec<B>, size: usize) {
|
||||
let owned_size_to_grow = size.checked_sub(vec.len());
|
||||
if let Some(delta) = owned_size_to_grow {
|
||||
if delta != 0 {
|
||||
vec.grow(delta, false);
|
||||
}
|
||||
} else {
|
||||
vec.truncate(size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Produces an iterator that yields the indexes of all bits that are set in the bitvec.
|
||||
///
|
||||
/// Will skip entire usize's worth of bits if they are all false.
|
||||
fn iterate_bitvec_indices(ownership: &BitVec<usize>) -> impl Iterator<Item = usize> + '_ {
|
||||
const BITS_PER_BLOCK: usize = mem::size_of::<usize>() * 8;
|
||||
|
||||
let size = ownership.len();
|
||||
|
||||
ownership
|
||||
.blocks()
|
||||
.enumerate()
|
||||
.filter(|&(_, word)| word != 0)
|
||||
.flat_map(move |(word_index, mut word)| {
|
||||
let bit_start = word_index * BITS_PER_BLOCK;
|
||||
let bit_end = (bit_start + BITS_PER_BLOCK).min(size);
|
||||
|
||||
(bit_start..bit_end).filter(move |_| {
|
||||
let active = word & 0b1 != 0;
|
||||
word >>= 1;
|
||||
|
||||
active
|
||||
})
|
||||
})
|
||||
}
|
||||
613
third-party/vendor/wgpu-core/src/track/mod.rs
vendored
Normal file
613
third-party/vendor/wgpu-core/src/track/mod.rs
vendored
Normal file
|
|
@ -0,0 +1,613 @@
|
|||
/*! Resource State and Lifetime Trackers
|
||||
|
||||
These structures are responsible for keeping track of resource state,
|
||||
generating barriers where needed, and making sure resources are kept
|
||||
alive until the trackers die.
|
||||
|
||||
## General Architecture
|
||||
|
||||
Tracking is some of the hottest code in the entire codebase, so the trackers
|
||||
are designed to be as cache efficient as possible. They store resource state
|
||||
in flat vectors, storing metadata SOA style, one vector per type of metadata.
|
||||
|
||||
A lot of the tracker code is deeply unsafe, using unchecked accesses all over
|
||||
to make performance as good as possible. However, for all unsafe accesses, there
|
||||
is a corresponding debug assert the checks if that access is valid. This helps
|
||||
get bugs caught fast, while still letting users not need to pay for the bounds
|
||||
checks.
|
||||
|
||||
In wgpu, resource IDs are allocated and re-used, so will always be as low
|
||||
as reasonably possible. This allows us to use the ID as an index into an array.
|
||||
|
||||
## Statefulness
|
||||
|
||||
There are two main types of trackers, stateful and stateless.
|
||||
|
||||
Stateful trackers are for buffers and textures. They both have
|
||||
resource state attached to them which needs to be used to generate
|
||||
automatic synchronization. Because of the different requirements of
|
||||
buffers and textures, they have two separate tracking structures.
|
||||
|
||||
Stateless trackers only store metadata and own the given resource.
|
||||
|
||||
## Use Case
|
||||
|
||||
Within each type of tracker, the trackers are further split into 3 different
|
||||
use cases, Bind Group, Usage Scope, and a full Tracker.
|
||||
|
||||
Bind Group trackers are just a list of different resources, their refcount,
|
||||
and how they are used. Textures are used via a selector and a usage type.
|
||||
Buffers by just a usage type. Stateless resources don't have a usage type.
|
||||
|
||||
Usage Scope trackers are only for stateful resources. These trackers represent
|
||||
a single [`UsageScope`] in the spec. When a use is added to a usage scope,
|
||||
it is merged with all other uses of that resource in that scope. If there
|
||||
is a usage conflict, merging will fail and an error will be reported.
|
||||
|
||||
Full trackers represent a before and after state of a resource. These
|
||||
are used for tracking on the device and on command buffers. The before
|
||||
state represents the state the resource is first used as in the command buffer,
|
||||
the after state is the state the command buffer leaves the resource in.
|
||||
These double ended buffers can then be used to generate the needed transitions
|
||||
between command buffers.
|
||||
|
||||
## Dense Datastructure with Sparse Data
|
||||
|
||||
This tracking system is based on having completely dense data, but trackers do
|
||||
not always contain every resource. Some resources (or even most resources) go
|
||||
unused in any given command buffer. So to help speed up the process of iterating
|
||||
through possibly thousands of resources, we use a bit vector to represent if
|
||||
a resource is in the buffer or not. This allows us extremely efficient memory
|
||||
utilization, as well as being able to bail out of whole blocks of 32-64 resources
|
||||
with a single usize comparison with zero. In practice this means that merging
|
||||
partially resident buffers is extremely quick.
|
||||
|
||||
The main advantage of this dense datastructure is that we can do merging
|
||||
of trackers in an extremely efficient fashion that results in us doing linear
|
||||
scans down a couple of buffers. CPUs and their caches absolutely eat this up.
|
||||
|
||||
## Stateful Resource Operations
|
||||
|
||||
All operations on stateful trackers boil down to one of four operations:
|
||||
- `insert(tracker, new_state)` adds a resource with a given state to the tracker
|
||||
for the first time.
|
||||
- `merge(tracker, new_state)` merges this new state with the previous state, checking
|
||||
for usage conflicts.
|
||||
- `barrier(tracker, new_state)` compares the given state to the existing state and
|
||||
generates the needed barriers.
|
||||
- `update(tracker, new_state)` takes the given new state and overrides the old state.
|
||||
|
||||
This allows us to compose the operations to form the various kinds of tracker merges
|
||||
that need to happen in the codebase. For each resource in the given merger, the following
|
||||
operation applies:
|
||||
|
||||
```text
|
||||
UsageScope <- Resource = insert(scope, usage) OR merge(scope, usage)
|
||||
UsageScope <- UsageScope = insert(scope, scope) OR merge(scope, scope)
|
||||
CommandBuffer <- UsageScope = insert(buffer.start, buffer.end, scope)
|
||||
OR barrier(buffer.end, scope) + update(buffer.end, scope)
|
||||
Device <- CommandBuffer = insert(device.start, device.end, buffer.start, buffer.end)
|
||||
OR barrier(device.end, buffer.start) + update(device.end, buffer.end)
|
||||
```
|
||||
|
||||
[`UsageScope`]: https://gpuweb.github.io/gpuweb/#programming-model-synchronization
|
||||
*/
|
||||
|
||||
mod buffer;
|
||||
mod metadata;
|
||||
mod range;
|
||||
mod stateless;
|
||||
mod texture;
|
||||
|
||||
use crate::{
|
||||
binding_model, command, conv,
|
||||
hal_api::HalApi,
|
||||
id::{self, TypedId},
|
||||
pipeline, resource, storage,
|
||||
};
|
||||
|
||||
use std::{fmt, ops};
|
||||
use thiserror::Error;
|
||||
|
||||
pub(crate) use buffer::{BufferBindGroupState, BufferTracker, BufferUsageScope};
|
||||
use metadata::{ResourceMetadata, ResourceMetadataProvider};
|
||||
pub(crate) use stateless::{StatelessBindGroupSate, StatelessTracker};
|
||||
pub(crate) use texture::{
|
||||
TextureBindGroupState, TextureSelector, TextureTracker, TextureUsageScope,
|
||||
};
|
||||
use wgt::strict_assert_ne;
|
||||
|
||||
/// A structure containing all the information about a particular resource
|
||||
/// transition. User code should be able to generate a pipeline barrier
|
||||
/// based on the contents.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub(crate) struct PendingTransition<S: ResourceUses> {
|
||||
pub id: u32,
|
||||
pub selector: S::Selector,
|
||||
pub usage: ops::Range<S>,
|
||||
}
|
||||
|
||||
impl PendingTransition<hal::BufferUses> {
|
||||
/// Produce the hal barrier corresponding to the transition.
|
||||
pub fn into_hal<'a, A: hal::Api>(
|
||||
self,
|
||||
buf: &'a resource::Buffer<A>,
|
||||
) -> hal::BufferBarrier<'a, A> {
|
||||
let buffer = buf.raw.as_ref().expect("Buffer is destroyed");
|
||||
hal::BufferBarrier {
|
||||
buffer,
|
||||
usage: self.usage,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PendingTransition<hal::TextureUses> {
|
||||
/// Produce the hal barrier corresponding to the transition.
|
||||
pub fn into_hal<'a, A: hal::Api>(
|
||||
self,
|
||||
tex: &'a resource::Texture<A>,
|
||||
) -> hal::TextureBarrier<'a, A> {
|
||||
let texture = tex.inner.as_raw().expect("Texture is destroyed");
|
||||
|
||||
// These showing up in a barrier is always a bug
|
||||
strict_assert_ne!(self.usage.start, hal::TextureUses::UNKNOWN);
|
||||
strict_assert_ne!(self.usage.end, hal::TextureUses::UNKNOWN);
|
||||
|
||||
let mip_count = self.selector.mips.end - self.selector.mips.start;
|
||||
strict_assert_ne!(mip_count, 0);
|
||||
let layer_count = self.selector.layers.end - self.selector.layers.start;
|
||||
strict_assert_ne!(layer_count, 0);
|
||||
|
||||
hal::TextureBarrier {
|
||||
texture,
|
||||
range: wgt::ImageSubresourceRange {
|
||||
aspect: wgt::TextureAspect::All,
|
||||
base_mip_level: self.selector.mips.start,
|
||||
mip_level_count: Some(mip_count),
|
||||
base_array_layer: self.selector.layers.start,
|
||||
array_layer_count: Some(layer_count),
|
||||
},
|
||||
usage: self.usage,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The uses that a resource or subresource can be in.
|
||||
pub(crate) trait ResourceUses:
|
||||
fmt::Debug + ops::BitAnd<Output = Self> + ops::BitOr<Output = Self> + PartialEq + Sized + Copy
|
||||
{
|
||||
/// All flags that are exclusive.
|
||||
const EXCLUSIVE: Self;
|
||||
|
||||
/// The relevant resource ID type.
|
||||
type Id: Copy + fmt::Debug + TypedId;
|
||||
/// The selector used by this resource.
|
||||
type Selector: fmt::Debug;
|
||||
|
||||
/// Turn the resource into a pile of bits.
|
||||
fn bits(self) -> u16;
|
||||
/// Returns true if the all the uses are ordered.
|
||||
fn all_ordered(self) -> bool;
|
||||
/// Returns true if any of the uses are exclusive.
|
||||
fn any_exclusive(self) -> bool;
|
||||
}
|
||||
|
||||
/// Returns true if the given states violates the usage scope rule
|
||||
/// of any(inclusive) XOR one(exclusive)
|
||||
fn invalid_resource_state<T: ResourceUses>(state: T) -> bool {
|
||||
// Is power of two also means "is one bit set". We check for this as if
|
||||
// we're in any exclusive state, we must only be in a single state.
|
||||
state.any_exclusive() && !conv::is_power_of_two_u16(state.bits())
|
||||
}
|
||||
|
||||
/// Returns true if the transition from one state to another does not require
|
||||
/// a barrier.
|
||||
fn skip_barrier<T: ResourceUses>(old_state: T, new_state: T) -> bool {
|
||||
// If the state didn't change and all the usages are ordered, the hardware
|
||||
// will guarentee the order of accesses, so we do not need to issue a barrier at all
|
||||
old_state == new_state && old_state.all_ordered()
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Error, Eq, PartialEq)]
|
||||
pub enum UsageConflict {
|
||||
#[error("Attempted to use invalid buffer")]
|
||||
BufferInvalid { id: id::BufferId },
|
||||
#[error("Attempted to use invalid texture")]
|
||||
TextureInvalid { id: id::TextureId },
|
||||
#[error("Attempted to use buffer with {invalid_use}.")]
|
||||
Buffer {
|
||||
id: id::BufferId,
|
||||
invalid_use: InvalidUse<hal::BufferUses>,
|
||||
},
|
||||
#[error("Attempted to use a texture (mips {mip_levels:?} layers {array_layers:?}) with {invalid_use}.")]
|
||||
Texture {
|
||||
id: id::TextureId,
|
||||
mip_levels: ops::Range<u32>,
|
||||
array_layers: ops::Range<u32>,
|
||||
invalid_use: InvalidUse<hal::TextureUses>,
|
||||
},
|
||||
}
|
||||
|
||||
impl UsageConflict {
|
||||
fn from_buffer(
|
||||
id: id::BufferId,
|
||||
current_state: hal::BufferUses,
|
||||
new_state: hal::BufferUses,
|
||||
) -> Self {
|
||||
Self::Buffer {
|
||||
id,
|
||||
invalid_use: InvalidUse {
|
||||
current_state,
|
||||
new_state,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn from_texture(
|
||||
id: id::TextureId,
|
||||
selector: TextureSelector,
|
||||
current_state: hal::TextureUses,
|
||||
new_state: hal::TextureUses,
|
||||
) -> Self {
|
||||
Self::Texture {
|
||||
id,
|
||||
mip_levels: selector.mips,
|
||||
array_layers: selector.layers,
|
||||
invalid_use: InvalidUse {
|
||||
current_state,
|
||||
new_state,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::error::PrettyError for UsageConflict {
|
||||
fn fmt_pretty(&self, fmt: &mut crate::error::ErrorFormatter) {
|
||||
fmt.error(self);
|
||||
match *self {
|
||||
Self::BufferInvalid { id } => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
Self::TextureInvalid { id } => {
|
||||
fmt.texture_label(&id);
|
||||
}
|
||||
Self::Buffer { id, .. } => {
|
||||
fmt.buffer_label(&id);
|
||||
}
|
||||
Self::Texture { id, .. } => {
|
||||
fmt.texture_label(&id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pretty print helper that shows helpful descriptions of a conflicting usage.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct InvalidUse<T> {
|
||||
current_state: T,
|
||||
new_state: T,
|
||||
}
|
||||
|
||||
impl<T: ResourceUses> fmt::Display for InvalidUse<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let current = self.current_state;
|
||||
let new = self.new_state;
|
||||
|
||||
let current_exclusive = current & T::EXCLUSIVE;
|
||||
let new_exclusive = new & T::EXCLUSIVE;
|
||||
|
||||
let exclusive = current_exclusive | new_exclusive;
|
||||
|
||||
// The text starts with "tried to use X resource with {self}"
|
||||
write!(
|
||||
f,
|
||||
"conflicting usages. Current usage {current:?} and new usage {new:?}. \
|
||||
{exclusive:?} is an exclusive usage and cannot be used with any other \
|
||||
usages within the usage scope (renderpass or compute dispatch)"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// All the usages that a bind group contains. The uses are not deduplicated in any way
|
||||
/// and may include conflicting uses. This is fully compliant by the WebGPU spec.
|
||||
///
|
||||
/// All bind group states are sorted by their ID so that when adding to a tracker,
|
||||
/// they are added in the most efficient order possible (assending order).
|
||||
pub(crate) struct BindGroupStates<A: HalApi> {
|
||||
pub buffers: BufferBindGroupState<A>,
|
||||
pub textures: TextureBindGroupState<A>,
|
||||
pub views: StatelessBindGroupSate<resource::TextureView<A>, id::TextureViewId>,
|
||||
pub samplers: StatelessBindGroupSate<resource::Sampler<A>, id::SamplerId>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> BindGroupStates<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
buffers: BufferBindGroupState::new(),
|
||||
textures: TextureBindGroupState::new(),
|
||||
views: StatelessBindGroupSate::new(),
|
||||
samplers: StatelessBindGroupSate::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Optimize the bind group states by sorting them by ID.
|
||||
///
|
||||
/// When this list of states is merged into a tracker, the memory
|
||||
/// accesses will be in a constant assending order.
|
||||
pub fn optimize(&mut self) {
|
||||
self.buffers.optimize();
|
||||
self.textures.optimize();
|
||||
self.views.optimize();
|
||||
self.samplers.optimize();
|
||||
}
|
||||
}
|
||||
|
||||
/// This is a render bundle specific usage scope. It includes stateless resources
|
||||
/// that are not normally included in a usage scope, but are used by render bundles
|
||||
/// and need to be owned by the render bundles.
|
||||
pub(crate) struct RenderBundleScope<A: HalApi> {
|
||||
pub buffers: BufferUsageScope<A>,
|
||||
pub textures: TextureUsageScope<A>,
|
||||
// Don't need to track views and samplers, they are never used directly, only by bind groups.
|
||||
pub bind_groups: StatelessTracker<A, binding_model::BindGroup<A>, id::BindGroupId>,
|
||||
pub render_pipelines: StatelessTracker<A, pipeline::RenderPipeline<A>, id::RenderPipelineId>,
|
||||
pub query_sets: StatelessTracker<A, resource::QuerySet<A>, id::QuerySetId>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> RenderBundleScope<A> {
|
||||
/// Create the render bundle scope and pull the maximum IDs from the hubs.
|
||||
pub fn new(
|
||||
buffers: &storage::Storage<resource::Buffer<A>, id::BufferId>,
|
||||
textures: &storage::Storage<resource::Texture<A>, id::TextureId>,
|
||||
bind_groups: &storage::Storage<binding_model::BindGroup<A>, id::BindGroupId>,
|
||||
render_pipelines: &storage::Storage<pipeline::RenderPipeline<A>, id::RenderPipelineId>,
|
||||
query_sets: &storage::Storage<resource::QuerySet<A>, id::QuerySetId>,
|
||||
) -> Self {
|
||||
let mut value = Self {
|
||||
buffers: BufferUsageScope::new(),
|
||||
textures: TextureUsageScope::new(),
|
||||
bind_groups: StatelessTracker::new(),
|
||||
render_pipelines: StatelessTracker::new(),
|
||||
query_sets: StatelessTracker::new(),
|
||||
};
|
||||
|
||||
value.buffers.set_size(buffers.len());
|
||||
value.textures.set_size(textures.len());
|
||||
value.bind_groups.set_size(bind_groups.len());
|
||||
value.render_pipelines.set_size(render_pipelines.len());
|
||||
value.query_sets.set_size(query_sets.len());
|
||||
|
||||
value
|
||||
}
|
||||
|
||||
/// Merge the inner contents of a bind group into the render bundle tracker.
|
||||
///
|
||||
/// Only stateful things are merged in here, all other resources are owned
|
||||
/// indirectly by the bind group.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The maximum ID given by each bind group resource must be less than the
|
||||
/// length of the storage given at the call to `new`.
|
||||
pub unsafe fn merge_bind_group(
|
||||
&mut self,
|
||||
textures: &storage::Storage<resource::Texture<A>, id::TextureId>,
|
||||
bind_group: &BindGroupStates<A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
unsafe { self.buffers.merge_bind_group(&bind_group.buffers)? };
|
||||
unsafe {
|
||||
self.textures
|
||||
.merge_bind_group(textures, &bind_group.textures)?
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A usage scope tracker. Only needs to store stateful resources as stateless
|
||||
/// resources cannot possibly have a usage conflict.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UsageScope<A: HalApi> {
|
||||
pub buffers: BufferUsageScope<A>,
|
||||
pub textures: TextureUsageScope<A>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> UsageScope<A> {
|
||||
/// Create the render bundle scope and pull the maximum IDs from the hubs.
|
||||
pub fn new(
|
||||
buffers: &storage::Storage<resource::Buffer<A>, id::BufferId>,
|
||||
textures: &storage::Storage<resource::Texture<A>, id::TextureId>,
|
||||
) -> Self {
|
||||
let mut value = Self {
|
||||
buffers: BufferUsageScope::new(),
|
||||
textures: TextureUsageScope::new(),
|
||||
};
|
||||
|
||||
value.buffers.set_size(buffers.len());
|
||||
value.textures.set_size(textures.len());
|
||||
|
||||
value
|
||||
}
|
||||
|
||||
/// Merge the inner contents of a bind group into the usage scope.
|
||||
///
|
||||
/// Only stateful things are merged in here, all other resources are owned
|
||||
/// indirectly by the bind group.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The maximum ID given by each bind group resource must be less than the
|
||||
/// length of the storage given at the call to `new`.
|
||||
pub unsafe fn merge_bind_group(
|
||||
&mut self,
|
||||
textures: &storage::Storage<resource::Texture<A>, id::TextureId>,
|
||||
bind_group: &BindGroupStates<A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
unsafe {
|
||||
self.buffers.merge_bind_group(&bind_group.buffers)?;
|
||||
self.textures
|
||||
.merge_bind_group(textures, &bind_group.textures)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merge the inner contents of a bind group into the usage scope.
|
||||
///
|
||||
/// Only stateful things are merged in here, all other resources are owned
|
||||
/// indirectly by a bind group or are merged directly into the command buffer tracker.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The maximum ID given by each bind group resource must be less than the
|
||||
/// length of the storage given at the call to `new`.
|
||||
pub unsafe fn merge_render_bundle(
|
||||
&mut self,
|
||||
textures: &storage::Storage<resource::Texture<A>, id::TextureId>,
|
||||
render_bundle: &RenderBundleScope<A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
self.buffers.merge_usage_scope(&render_bundle.buffers)?;
|
||||
self.textures
|
||||
.merge_usage_scope(textures, &render_bundle.textures)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A full double sided tracker used by CommandBuffers and the Device.
|
||||
pub(crate) struct Tracker<A: HalApi> {
|
||||
pub buffers: BufferTracker<A>,
|
||||
pub textures: TextureTracker<A>,
|
||||
pub views: StatelessTracker<A, resource::TextureView<A>, id::TextureViewId>,
|
||||
pub samplers: StatelessTracker<A, resource::Sampler<A>, id::SamplerId>,
|
||||
pub bind_groups: StatelessTracker<A, binding_model::BindGroup<A>, id::BindGroupId>,
|
||||
pub compute_pipelines: StatelessTracker<A, pipeline::ComputePipeline<A>, id::ComputePipelineId>,
|
||||
pub render_pipelines: StatelessTracker<A, pipeline::RenderPipeline<A>, id::RenderPipelineId>,
|
||||
pub bundles: StatelessTracker<A, command::RenderBundle<A>, id::RenderBundleId>,
|
||||
pub query_sets: StatelessTracker<A, resource::QuerySet<A>, id::QuerySetId>,
|
||||
}
|
||||
|
||||
impl<A: HalApi> Tracker<A> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
buffers: BufferTracker::new(),
|
||||
textures: TextureTracker::new(),
|
||||
views: StatelessTracker::new(),
|
||||
samplers: StatelessTracker::new(),
|
||||
bind_groups: StatelessTracker::new(),
|
||||
compute_pipelines: StatelessTracker::new(),
|
||||
render_pipelines: StatelessTracker::new(),
|
||||
bundles: StatelessTracker::new(),
|
||||
query_sets: StatelessTracker::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Pull the maximum IDs from the hubs.
|
||||
pub fn set_size(
|
||||
&mut self,
|
||||
buffers: Option<&storage::Storage<resource::Buffer<A>, id::BufferId>>,
|
||||
textures: Option<&storage::Storage<resource::Texture<A>, id::TextureId>>,
|
||||
views: Option<&storage::Storage<resource::TextureView<A>, id::TextureViewId>>,
|
||||
samplers: Option<&storage::Storage<resource::Sampler<A>, id::SamplerId>>,
|
||||
bind_groups: Option<&storage::Storage<binding_model::BindGroup<A>, id::BindGroupId>>,
|
||||
compute_pipelines: Option<
|
||||
&storage::Storage<pipeline::ComputePipeline<A>, id::ComputePipelineId>,
|
||||
>,
|
||||
render_pipelines: Option<
|
||||
&storage::Storage<pipeline::RenderPipeline<A>, id::RenderPipelineId>,
|
||||
>,
|
||||
bundles: Option<&storage::Storage<command::RenderBundle<A>, id::RenderBundleId>>,
|
||||
query_sets: Option<&storage::Storage<resource::QuerySet<A>, id::QuerySetId>>,
|
||||
) {
|
||||
if let Some(buffers) = buffers {
|
||||
self.buffers.set_size(buffers.len());
|
||||
};
|
||||
if let Some(textures) = textures {
|
||||
self.textures.set_size(textures.len());
|
||||
};
|
||||
if let Some(views) = views {
|
||||
self.views.set_size(views.len());
|
||||
};
|
||||
if let Some(samplers) = samplers {
|
||||
self.samplers.set_size(samplers.len());
|
||||
};
|
||||
if let Some(bind_groups) = bind_groups {
|
||||
self.bind_groups.set_size(bind_groups.len());
|
||||
};
|
||||
if let Some(compute_pipelines) = compute_pipelines {
|
||||
self.compute_pipelines.set_size(compute_pipelines.len());
|
||||
}
|
||||
if let Some(render_pipelines) = render_pipelines {
|
||||
self.render_pipelines.set_size(render_pipelines.len());
|
||||
};
|
||||
if let Some(bundles) = bundles {
|
||||
self.bundles.set_size(bundles.len());
|
||||
};
|
||||
if let Some(query_sets) = query_sets {
|
||||
self.query_sets.set_size(query_sets.len());
|
||||
};
|
||||
}
|
||||
|
||||
/// Iterates through all resources in the given bind group and adopts
|
||||
/// the state given for those resources in the UsageScope. It also
|
||||
/// removes all touched resources from the usage scope.
|
||||
///
|
||||
/// If a transition is needed to get the resources into the needed
|
||||
/// state, those transitions are stored within the tracker. A
|
||||
/// subsequent call to [`BufferTracker::drain`] or
|
||||
/// [`TextureTracker::drain`] is needed to get those transitions.
|
||||
///
|
||||
/// This is a really funky method used by Compute Passes to generate
|
||||
/// barriers after a call to dispatch without needing to iterate
|
||||
/// over all elements in the usage scope. We use each the
|
||||
/// bind group as a source of which IDs to look at. The bind groups
|
||||
/// must have first been added to the usage scope.
|
||||
///
|
||||
/// Only stateful things are merged in here, all other resources are owned
|
||||
/// indirectly by the bind group.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The maximum ID given by each bind group resource must be less than the
|
||||
/// value given to `set_size`
|
||||
pub unsafe fn set_and_remove_from_usage_scope_sparse(
|
||||
&mut self,
|
||||
textures: &storage::Storage<resource::Texture<A>, id::TextureId>,
|
||||
scope: &mut UsageScope<A>,
|
||||
bind_group: &BindGroupStates<A>,
|
||||
) {
|
||||
unsafe {
|
||||
self.buffers.set_and_remove_from_usage_scope_sparse(
|
||||
&mut scope.buffers,
|
||||
bind_group.buffers.used(),
|
||||
)
|
||||
};
|
||||
unsafe {
|
||||
self.textures.set_and_remove_from_usage_scope_sparse(
|
||||
textures,
|
||||
&mut scope.textures,
|
||||
&bind_group.textures,
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
/// Tracks the stateless resources from the given renderbundle. It is expected
|
||||
/// that the stateful resources will get merged into a usage scope first.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The maximum ID given by each bind group resource must be less than the
|
||||
/// value given to `set_size`
|
||||
pub unsafe fn add_from_render_bundle(
|
||||
&mut self,
|
||||
render_bundle: &RenderBundleScope<A>,
|
||||
) -> Result<(), UsageConflict> {
|
||||
self.bind_groups
|
||||
.add_from_tracker(&render_bundle.bind_groups);
|
||||
self.render_pipelines
|
||||
.add_from_tracker(&render_bundle.render_pipelines);
|
||||
self.query_sets.add_from_tracker(&render_bundle.query_sets);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
206
third-party/vendor/wgpu-core/src/track/range.rs
vendored
Normal file
206
third-party/vendor/wgpu-core/src/track/range.rs
vendored
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
//Note: this could be the only place where we need `SmallVec`.
|
||||
//TODO: consider getting rid of it.
|
||||
use smallvec::SmallVec;
|
||||
|
||||
use std::{fmt::Debug, iter, ops::Range};
|
||||
|
||||
/// Structure that keeps track of a I -> T mapping,
|
||||
/// optimized for a case where keys of the same values
|
||||
/// are often grouped together linearly.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub(crate) struct RangedStates<I, T> {
|
||||
/// List of ranges, each associated with a singe value.
|
||||
/// Ranges of keys have to be non-intersecting and ordered.
|
||||
ranges: SmallVec<[(Range<I>, T); 1]>,
|
||||
}
|
||||
|
||||
impl<I: Copy + Ord, T: Copy + PartialEq> RangedStates<I, T> {
|
||||
pub fn from_range(range: Range<I>, value: T) -> Self {
|
||||
Self {
|
||||
ranges: iter::once((range, value)).collect(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a new instance from a slice of ranges.
|
||||
#[cfg(test)]
|
||||
pub fn from_slice(values: &[(Range<I>, T)]) -> Self {
|
||||
Self {
|
||||
ranges: values.iter().cloned().collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &(Range<I>, T)> + Clone {
|
||||
self.ranges.iter()
|
||||
}
|
||||
|
||||
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (Range<I>, T)> {
|
||||
self.ranges.iter_mut()
|
||||
}
|
||||
|
||||
/// Check that all the ranges are non-intersecting and ordered.
|
||||
/// Panics otherwise.
|
||||
#[cfg(test)]
|
||||
fn check_sanity(&self) {
|
||||
for a in self.ranges.iter() {
|
||||
assert!(a.0.start < a.0.end);
|
||||
}
|
||||
for (a, b) in self.ranges.iter().zip(self.ranges[1..].iter()) {
|
||||
assert!(a.0.end <= b.0.start);
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge the neighboring ranges together, where possible.
|
||||
pub fn coalesce(&mut self) {
|
||||
let mut num_removed = 0;
|
||||
let mut iter = self.ranges.iter_mut();
|
||||
let mut cur = match iter.next() {
|
||||
Some(elem) => elem,
|
||||
None => return,
|
||||
};
|
||||
for next in iter {
|
||||
if cur.0.end == next.0.start && cur.1 == next.1 {
|
||||
num_removed += 1;
|
||||
cur.0.end = next.0.end;
|
||||
next.0.end = next.0.start;
|
||||
} else {
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
if num_removed != 0 {
|
||||
self.ranges.retain(|pair| pair.0.start != pair.0.end);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn iter_filter<'a>(
|
||||
&'a self,
|
||||
range: &'a Range<I>,
|
||||
) -> impl Iterator<Item = (Range<I>, &T)> + 'a {
|
||||
self.ranges
|
||||
.iter()
|
||||
.filter(move |&&(ref inner, ..)| inner.end > range.start && inner.start < range.end)
|
||||
.map(move |&(ref inner, ref v)| {
|
||||
let new_range = inner.start.max(range.start)..inner.end.min(range.end);
|
||||
|
||||
(new_range, v)
|
||||
})
|
||||
}
|
||||
|
||||
/// Split the storage ranges in such a way that there is a linear subset of
|
||||
/// them occupying exactly `index` range, which is returned mutably.
|
||||
///
|
||||
/// Gaps in the ranges are filled with `default` value.
|
||||
pub fn isolate(&mut self, index: &Range<I>, default: T) -> &mut [(Range<I>, T)] {
|
||||
//TODO: implement this in 2 passes:
|
||||
// 1. scan the ranges to figure out how many extra ones need to be inserted
|
||||
// 2. go through the ranges by moving them them to the right and inserting the missing ones
|
||||
|
||||
let mut start_pos = match self.ranges.iter().position(|pair| pair.0.end > index.start) {
|
||||
Some(pos) => pos,
|
||||
None => {
|
||||
let pos = self.ranges.len();
|
||||
self.ranges.push((index.clone(), default));
|
||||
return &mut self.ranges[pos..];
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
let (range, value) = self.ranges[start_pos].clone();
|
||||
if range.start < index.start {
|
||||
self.ranges[start_pos].0.start = index.start;
|
||||
self.ranges
|
||||
.insert(start_pos, (range.start..index.start, value));
|
||||
start_pos += 1;
|
||||
}
|
||||
}
|
||||
let mut pos = start_pos;
|
||||
let mut range_pos = index.start;
|
||||
loop {
|
||||
let (range, value) = self.ranges[pos].clone();
|
||||
if range.start >= index.end {
|
||||
self.ranges.insert(pos, (range_pos..index.end, default));
|
||||
pos += 1;
|
||||
break;
|
||||
}
|
||||
if range.start > range_pos {
|
||||
self.ranges.insert(pos, (range_pos..range.start, default));
|
||||
pos += 1;
|
||||
range_pos = range.start;
|
||||
}
|
||||
if range.end >= index.end {
|
||||
if range.end != index.end {
|
||||
self.ranges[pos].0.start = index.end;
|
||||
self.ranges.insert(pos, (range_pos..index.end, value));
|
||||
}
|
||||
pos += 1;
|
||||
break;
|
||||
}
|
||||
pos += 1;
|
||||
range_pos = range.end;
|
||||
if pos == self.ranges.len() {
|
||||
self.ranges.push((range_pos..index.end, default));
|
||||
pos += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
&mut self.ranges[start_pos..pos]
|
||||
}
|
||||
|
||||
/// Helper method for isolation that checks the sanity of the results.
|
||||
#[cfg(test)]
|
||||
pub fn sanely_isolated(&self, index: Range<I>, default: T) -> Vec<(Range<I>, T)> {
|
||||
let mut clone = self.clone();
|
||||
let result = clone.isolate(&index, default).to_vec();
|
||||
clone.check_sanity();
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
//TODO: randomized/fuzzy testing
|
||||
use super::RangedStates;
|
||||
|
||||
#[test]
|
||||
fn sane_good() {
|
||||
let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9)]);
|
||||
rs.check_sanity();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn sane_empty() {
|
||||
let rs = RangedStates::from_slice(&[(1..4, 9u8), (5..5, 9)]);
|
||||
rs.check_sanity();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn sane_intersect() {
|
||||
let rs = RangedStates::from_slice(&[(1..4, 9u8), (3..5, 9)]);
|
||||
rs.check_sanity();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn coalesce() {
|
||||
let mut rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
|
||||
rs.coalesce();
|
||||
rs.check_sanity();
|
||||
assert_eq!(rs.ranges.as_slice(), &[(1..5, 9), (5..7, 1), (8..9, 1),]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn isolate() {
|
||||
let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
|
||||
assert_eq!(&rs.sanely_isolated(4..5, 0), &[(4..5, 9u8),]);
|
||||
assert_eq!(
|
||||
&rs.sanely_isolated(0..6, 0),
|
||||
&[(0..1, 0), (1..4, 9u8), (4..5, 9), (5..6, 1),]
|
||||
);
|
||||
assert_eq!(&rs.sanely_isolated(8..10, 1), &[(8..9, 1), (9..10, 1),]);
|
||||
assert_eq!(
|
||||
&rs.sanely_isolated(6..9, 0),
|
||||
&[(6..7, 1), (7..8, 0), (8..9, 1),]
|
||||
);
|
||||
}
|
||||
}
|
||||
203
third-party/vendor/wgpu-core/src/track/stateless.rs
vendored
Normal file
203
third-party/vendor/wgpu-core/src/track/stateless.rs
vendored
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
/*! Stateless Trackers
|
||||
*
|
||||
* Stateless trackers don't have any state, so make no
|
||||
* distinction between a usage scope and a full tracker.
|
||||
!*/
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{
|
||||
hal_api::HalApi,
|
||||
id::{TypedId, Valid},
|
||||
resource, storage,
|
||||
track::ResourceMetadata,
|
||||
RefCount,
|
||||
};
|
||||
|
||||
/// Stores all the resources that a bind group stores.
|
||||
pub(crate) struct StatelessBindGroupSate<T, Id: TypedId> {
|
||||
resources: Vec<(Valid<Id>, RefCount)>,
|
||||
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: resource::Resource, Id: TypedId> StatelessBindGroupSate<T, Id> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
resources: Vec::new(),
|
||||
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Optimize the buffer bind group state by sorting it by ID.
|
||||
///
|
||||
/// When this list of states is merged into a tracker, the memory
|
||||
/// accesses will be in a constant assending order.
|
||||
pub(crate) fn optimize(&mut self) {
|
||||
self.resources
|
||||
.sort_unstable_by_key(|&(id, _)| id.0.unzip().0);
|
||||
}
|
||||
|
||||
/// Returns a list of all resources tracked. May contain duplicates.
|
||||
pub fn used(&self) -> impl Iterator<Item = Valid<Id>> + '_ {
|
||||
self.resources.iter().map(|&(id, _)| id)
|
||||
}
|
||||
|
||||
/// Adds the given resource.
|
||||
pub fn add_single<'a>(
|
||||
&mut self,
|
||||
storage: &'a storage::Storage<T, Id>,
|
||||
id: Id,
|
||||
) -> Option<&'a T> {
|
||||
let resource = storage.get(id).ok()?;
|
||||
|
||||
self.resources
|
||||
.push((Valid(id), resource.life_guard().add_ref()));
|
||||
|
||||
Some(resource)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores all resource state within a command buffer or device.
|
||||
pub(crate) struct StatelessTracker<A: HalApi, T, Id: TypedId> {
|
||||
metadata: ResourceMetadata<A>,
|
||||
|
||||
_phantom: PhantomData<(T, Id)>,
|
||||
}
|
||||
|
||||
impl<A: HalApi, T: resource::Resource, Id: TypedId> StatelessTracker<A, T, Id> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
metadata: ResourceMetadata::new(),
|
||||
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn tracker_assert_in_bounds(&self, index: usize) {
|
||||
self.metadata.tracker_assert_in_bounds(index);
|
||||
}
|
||||
|
||||
/// Sets the size of all the vectors inside the tracker.
|
||||
///
|
||||
/// Must be called with the highest possible Resource ID of this type
|
||||
/// before all unsafe functions are called.
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.metadata.set_size(size);
|
||||
}
|
||||
|
||||
/// Extend the vectors to let the given index be valid.
|
||||
fn allow_index(&mut self, index: usize) {
|
||||
if index >= self.metadata.size() {
|
||||
self.set_size(index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a list of all resources tracked.
|
||||
pub fn used(&self) -> impl Iterator<Item = Valid<Id>> + '_ {
|
||||
self.metadata.owned_ids()
|
||||
}
|
||||
|
||||
/// Inserts a single resource into the resource tracker.
|
||||
///
|
||||
/// If the resource already exists in the tracker, it will be overwritten.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn insert_single(&mut self, id: Valid<Id>, ref_count: RefCount) {
|
||||
let (index32, epoch, _) = id.0.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
self.allow_index(index);
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
self.metadata.insert(index, epoch, ref_count);
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the given resource to the tracker.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn add_single<'a>(
|
||||
&mut self,
|
||||
storage: &'a storage::Storage<T, Id>,
|
||||
id: Id,
|
||||
) -> Option<&'a T> {
|
||||
let item = storage.get(id).ok()?;
|
||||
|
||||
let (index32, epoch, _) = id.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
self.allow_index(index);
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
self.metadata
|
||||
.insert(index, epoch, item.life_guard().add_ref());
|
||||
}
|
||||
|
||||
Some(item)
|
||||
}
|
||||
|
||||
/// Adds the given resources from the given tracker.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// the vectors will be extended. A call to set_size is not needed.
|
||||
pub fn add_from_tracker(&mut self, other: &Self) {
|
||||
let incoming_size = other.metadata.size();
|
||||
if incoming_size > self.metadata.size() {
|
||||
self.set_size(incoming_size);
|
||||
}
|
||||
|
||||
for index in other.metadata.owned_indices() {
|
||||
self.tracker_assert_in_bounds(index);
|
||||
other.tracker_assert_in_bounds(index);
|
||||
unsafe {
|
||||
let previously_owned = self.metadata.contains_unchecked(index);
|
||||
|
||||
if !previously_owned {
|
||||
let epoch = other.metadata.get_epoch_unchecked(index);
|
||||
let other_ref_count = other.metadata.get_ref_count_unchecked(index);
|
||||
self.metadata.insert(index, epoch, other_ref_count.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the given resource from the tracker iff we have the last reference to the
|
||||
/// resource and the epoch matches.
|
||||
///
|
||||
/// Returns true if the resource was removed.
|
||||
///
|
||||
/// If the ID is higher than the length of internal vectors,
|
||||
/// false will be returned.
|
||||
pub fn remove_abandoned(&mut self, id: Valid<Id>) -> bool {
|
||||
let (index32, epoch, _) = id.0.unzip();
|
||||
let index = index32 as usize;
|
||||
|
||||
if index > self.metadata.size() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.tracker_assert_in_bounds(index);
|
||||
|
||||
unsafe {
|
||||
if self.metadata.contains_unchecked(index) {
|
||||
let existing_epoch = self.metadata.get_epoch_unchecked(index);
|
||||
let existing_ref_count = self.metadata.get_ref_count_unchecked(index);
|
||||
|
||||
if existing_epoch == epoch && existing_ref_count.load() == 1 {
|
||||
self.metadata.remove(index);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
1552
third-party/vendor/wgpu-core/src/track/texture.rs
vendored
Normal file
1552
third-party/vendor/wgpu-core/src/track/texture.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
1308
third-party/vendor/wgpu-core/src/validation.rs
vendored
Normal file
1308
third-party/vendor/wgpu-core/src/validation.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue