Vendor things
This commit is contained in:
parent
5deceec006
commit
977e3c17e5
19434 changed files with 10682014 additions and 0 deletions
1
third-party/vendor/png/.cargo-checksum.json
vendored
Normal file
1
third-party/vendor/png/.cargo-checksum.json
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"files":{"CHANGES.md":"a53250005c1da36d801ad997b8ce14f3814c741d136d5eebf9ebb2b3cf826b61","Cargo.lock":"e1b4a061845139e1d54a0209c68b92ef1825d5f886298eec94cfb153254d0a53","Cargo.toml":"c15f47bc0da06ef4246a813c9171bcb8a41aee1a77c3ca385e8c4ce90179ad9a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"eaf40297c75da471f7cda1f3458e8d91b4b2ec866e609527a13acfa93b638652","README.md":"a87e2bc972068409cff0665241349c4eb72291b33d12ed97a09f97e9a2560655","benches/README.md":"0c60c3d497abdf6c032863aa47da41bc6bb4f5ff696d45dec0e6eb33459b14b0","benches/decoder.rs":"a54832d82a48bba09b588456c67b3d4f9202eab213ee47c65de0b87b00b65b44","benches/expand_paletted.rs":"945123a752835f13a4cc1c82470602183f9613c71b1e81f6a8096e2bb227423b","benches/unfilter.rs":"413392304afaf6e016924b582dd6dc0d39aa9e39acdcdec52cbd45326f92bfa2","examples/change-png-info.rs":"7cb5627bc8701a5acb28990bc9c98443d5a6edf73020c70bf3e73f176d4804c2","examples/corpus-bench.rs":"c1fcbf01b188b021798ae8406ee6fbe0678fbd46f2ffe7dfa743e6001cd28b9e","examples/png-generate.rs":"e4cca06b9cc3291b52261d88a2e016940620b613fc1402bb54ccc68f73026a96","examples/pngcheck.rs":"7a5cb4cbb4d166f4337ff69a9e4b16783dce482a56ca56a709bf636d8f3bb981","examples/show.rs":"5c4d5231bafca9c6c01d930792b69f30c659eb6214d78f55f649ed2ca1880ae0","src/adam7.rs":"62851c15b0591004a9add076a349d1c0f08d8555eb21211cd4a620f8fa178a7c","src/benchable_apis.rs":"bb5176efe8b44ed9d9edab7059a53cfba8e384e884517eaba1f4a7597641121f","src/chunk.rs":"eff04345e9af621ce51a9141f35657262ee1a891e724332a80ac40eec90a2c45","src/common.rs":"98e14532e8fb83aa744fcfd321cbaf47f99f29bbc3b0b59b009634012b42af4a","src/decoder/mod.rs":"4f6b7ddafc01e4e84afb216ba0995d94db6a216deb31b88d269675eb4feefe3d","src/decoder/stream.rs":"93c403bb9f008e2efe9f50733908e1b9f1be2f9f168d5fbe6caed660d84e2f85","src/decoder/transform.rs":"250058f36f63a3b4fe46f5f777359f24e4328d2767f7341f337df08d7e9a2481","src/decoder/transform/palette.rs":"f0d2e3de33d4fe26135f1ebcaeaebf2ffc490de8766bed254a975552aa44576b","src/decoder/zlib.rs":"f76341b9ec50a9cb0e8326fdea26ef967f09a4700761d135675328bc1af32fce","src/encoder.rs":"69e3e12aef222f7a01bf60af5f1c6bbd1b016acbf338eac2b084b22869595c1d","src/filter.rs":"48174721ad77b9319bd996cbb562df97b912cde058addfe7b46b04bd6c34d834","src/lib.rs":"586f0f5fe369f4be7f0e363d41c3adba06abe45115b51e8afd214bbe994d9d33","src/srgb.rs":"da1609902064016853410633926d316b5289d4bbe1fa469b21f116c1c1b2c18e","src/test_utils.rs":"4b25339d91ef71ce1bb67d3f68d742a41c64c11042247fe9dfa231457642788f","src/text_metadata.rs":"b73fec5d6009b08374eb137dbf906d5d75792f5a6c7249d601bdfc51c31f5749","src/traits.rs":"79d357244e493f5174ca11873b0d5c443fd4a5e6e1f7c6df400a1767c5ad05b2"},"package":"06e4b0d3d1312775e782c86c91a111aa1f910cbb65e1337f9975b5f9a554b5e1"}
|
||||
193
third-party/vendor/png/CHANGES.md
vendored
Normal file
193
third-party/vendor/png/CHANGES.md
vendored
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
## Unreleased
|
||||
|
||||
## 0.17.13
|
||||
|
||||
* Fix `Send` bound on `Reader`.
|
||||
|
||||
## 0.17.12
|
||||
|
||||
* Reject zero-sized frames.
|
||||
* Optimized decoding of paletted images.
|
||||
* Removed remaining uses of miniz_oxide for decoding.
|
||||
* Correct lifetime used for `Info` struct.
|
||||
* Fix build issue with `-Z minimal-versions`.
|
||||
|
||||
## 0.17.11
|
||||
|
||||
* Ignore subsequent iCCP chunks to match libpng behavior.
|
||||
* Added an option to ignore ancillary chunks with invalid CRC.
|
||||
* Added `new_with_info` constructor for encoder.
|
||||
* Removed hard-coded memory limits.
|
||||
* No longer allow zero sized images.
|
||||
* Added `Reader::finish` to read all the auxillary chunks that comes after the
|
||||
image.
|
||||
|
||||
## 0.17.10
|
||||
|
||||
* Added Transformations::ALPHA
|
||||
* Enable encoding pixel dimensions
|
||||
|
||||
## 0.17.9
|
||||
|
||||
* Fixed a bug in ICC profile decompression.
|
||||
* Improved unfilter performance.
|
||||
|
||||
## 0.17.8
|
||||
|
||||
* Increased MSRV to 1.57.0.
|
||||
* Substantially optimized encoding and decoding:
|
||||
- Autovectorize filtering and unfiltering.
|
||||
- Make the "fast" compression preset use fdeflate.
|
||||
- Switch decompression to always use fdeflate.
|
||||
- Updated to miniz_oxide 0.7.
|
||||
- Added an option to ignore checksums.
|
||||
* Added corpus-bench example which measures the compression ratio and time to
|
||||
re-encode and subsequently decode a corpus of images.
|
||||
* More fuzz testing.
|
||||
|
||||
## 0.17.7
|
||||
|
||||
* Fixed handling broken tRNS chunk.
|
||||
* Updated to miniz_oxide 0.6.
|
||||
|
||||
## 0.17.6
|
||||
|
||||
* Added `Decoder::read_header_info` to query the information contained in the
|
||||
PNG header.
|
||||
* Switched to using the flate2 crate for encoding.
|
||||
|
||||
## 0.17.5
|
||||
|
||||
* Fixed a regression, introduced by chunk validation, that made the decoder
|
||||
sensitive to the order of `gAMA`, `cHRM`, and `sRGB` chunks.
|
||||
|
||||
## 0.17.4
|
||||
|
||||
* Added `{Decoder,StreamDecoder}::set_ignore_text_chunk` to disable decoding of
|
||||
ancillary text chunks during the decoding process (chunks decoded by default).
|
||||
* Added duplicate chunk checks. The decoder now enforces that standard chunks
|
||||
such as palette, gamma, … occur at most once as specified.
|
||||
* Added `#[forbid(unsafe_code)]` again. This may come at a minor performance
|
||||
cost when decoding ASCII text for now.
|
||||
* Fixed a bug where decoding of large chunks (>32kB) failed to produce the
|
||||
correct result, or fail the image decoding. As new chunk types are decoded
|
||||
this introduced regressions relative to previous versions.
|
||||
|
||||
## 0.17.3
|
||||
|
||||
* Fixed a bug where `Writer::finish` would not drop the underlying writer. This
|
||||
would fail to flush and leak memory when using a buffered file writers.
|
||||
* Calling `Writer::finish` will now eagerly flush the underlying writer,
|
||||
returning any error that this operation may result in.
|
||||
* Errors in inflate are now diagnosed with more details.
|
||||
* The color and depth combination is now checked in stream decoder.
|
||||
|
||||
## 0.17.2
|
||||
|
||||
* Added support for encoding and decoding tEXt/zTXt/iTXt chunks.
|
||||
* Added `Encoder::validate_sequence` to enable validation of the written frame
|
||||
sequence, that is, if the number of written images is consistent with the
|
||||
animation state.
|
||||
* Validation is now off by default. The basis of the new validation had been
|
||||
introduced in 0.17 but this fixes some cases where this validation was too
|
||||
aggressive compared to previous versions.
|
||||
* Added `Writer::finish` to fully check the write of the end of an image
|
||||
instead of silently ignoring potential errors in `Drop`.
|
||||
* The `Writer::write_chunk` method now validates that the computed chunk length
|
||||
does not overflow the limit set by PNG.
|
||||
* Fix an issue where the library would panic or even abort the process when
|
||||
`flush` or `write` of an underlying writer panicked, or in some other uses of
|
||||
`StreamWriter`.
|
||||
|
||||
## 0.17.1
|
||||
|
||||
* Fix panic in adaptive filter method `sum_buffer`
|
||||
|
||||
## 0.17.0
|
||||
|
||||
* Increased MSRV to 1.46.0
|
||||
* Rework output info usage
|
||||
* Implement APNG encoding
|
||||
* Improve ergonomics of encoder set_palette and set_trns methods
|
||||
* Make Info struct non-exhaustive
|
||||
* Make encoder a core feature
|
||||
* Default Transformations to Identity
|
||||
* Add Adaptive filtering method for encoding
|
||||
* Fix SCREAM_CASE on ColorType variants
|
||||
* Forbid unsafe code
|
||||
|
||||
## 0.16.7
|
||||
|
||||
* Added `Encoder::set_trns` to register a transparency table to be written.
|
||||
|
||||
## 0.16.6
|
||||
|
||||
* Fixed silent integer overflows in buffer size calculation, resulting in
|
||||
panics from assertions and out-of-bounds accesses when actually decoding.
|
||||
This improves the stability of 32-bit and 16-bit targets and make decoding
|
||||
run as stable as on 64-bit.
|
||||
* Reject invalid color/depth combinations. Some would lead to mismatched output
|
||||
buffer size and panics during decoding.
|
||||
* Add `Clone` impl for `Info` struct.
|
||||
|
||||
## 0.16.5
|
||||
|
||||
* Decoding of APNG subframes is now officially supported and specified. Note
|
||||
that dispose ops and positioning in the image need to be done by the caller.
|
||||
* Added encoding of indexed data.
|
||||
* Switched to `miniz_oxide` for decompressing image data, with 30%-50% speedup
|
||||
in common cases and up to 200% in special ones.
|
||||
* Fix accepting images only with consecutive IDAT chunks, rules out data loss.
|
||||
|
||||
## 0.16.4
|
||||
|
||||
* The fdAT frames are no longer inspected when the main image is read. This
|
||||
would previously be the case for non-interlaced images. This would lead to
|
||||
incorrect failure and, e.g. an error of the form `"invalid filter method"`.
|
||||
* Fix always validating the last IDAT-chunks checksum, was sometimes ignored.
|
||||
* Prevent encoding color/bit-depth combinations forbidden by the specification.
|
||||
* The fixes for APNG/fdAT enable further implementation. The _next_ release is
|
||||
expected to officially support APNG.
|
||||
|
||||
## 0.16.3
|
||||
|
||||
* Fix encoding with filtering methods Up, Avg, Paeth
|
||||
* Optimize decoding throughput by up to +30%
|
||||
|
||||
## 0.16.2
|
||||
|
||||
* Added method constructing an owned stream encoder.
|
||||
|
||||
## 0.16.1
|
||||
|
||||
* Addressed files bloating the packed crate
|
||||
|
||||
## 0.16.0
|
||||
|
||||
* Fix a bug compressing images with deflate
|
||||
* Address use of deprecated error interfaces
|
||||
|
||||
## 0.15.3
|
||||
|
||||
* Fix panic while trying to encode empty images. Such images are no longer
|
||||
accepted and error when calling `write_header` before any data has been
|
||||
written. The specification does not permit empty images.
|
||||
|
||||
## 0.15.2
|
||||
|
||||
* Fix `EXPAND` transformation to leave bit depths above 8 unchanged
|
||||
|
||||
## 0.15.1
|
||||
|
||||
* Fix encoding writing invalid chunks. Images written can be corrected: see
|
||||
https://github.com/image-rs/image/issues/1074 for a recovery.
|
||||
* Fix a panic in bit unpacking with checked arithmetic (e.g. in debug builds)
|
||||
* Added better fuzzer integration
|
||||
* Update `term`, `rand` dev-dependency
|
||||
* Note: The `show` example program requires a newer compiler than 1.34.2 on
|
||||
some targets due to depending on `glium`. This is not considered a breaking
|
||||
bug.
|
||||
|
||||
## 0.15
|
||||
|
||||
Begin of changelog
|
||||
2109
third-party/vendor/png/Cargo.lock
generated
vendored
Normal file
2109
third-party/vendor/png/Cargo.lock
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
95
third-party/vendor/png/Cargo.toml
vendored
Normal file
95
third-party/vendor/png/Cargo.toml
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
rust-version = "1.57"
|
||||
name = "png"
|
||||
version = "0.17.13"
|
||||
authors = ["The image-rs Developers"]
|
||||
include = [
|
||||
"/LICENSE-MIT",
|
||||
"/LICENSE-APACHE",
|
||||
"/README.md",
|
||||
"/CHANGES.md",
|
||||
"/src/",
|
||||
"/examples/",
|
||||
"/benches/",
|
||||
]
|
||||
description = "PNG decoding and encoding library in pure Rust"
|
||||
readme = "README.md"
|
||||
categories = ["multimedia::images"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/image-rs/image-png"
|
||||
|
||||
[[bench]]
|
||||
name = "decoder"
|
||||
path = "benches/decoder.rs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "unfilter"
|
||||
path = "benches/unfilter.rs"
|
||||
harness = false
|
||||
required-features = ["benchmarks"]
|
||||
|
||||
[[bench]]
|
||||
name = "expand_paletted"
|
||||
path = "benches/expand_paletted.rs"
|
||||
harness = false
|
||||
required-features = ["benchmarks"]
|
||||
|
||||
[dependencies.bitflags]
|
||||
version = "1.0"
|
||||
|
||||
[dependencies.crc32fast]
|
||||
version = "1.2.0"
|
||||
|
||||
[dependencies.fdeflate]
|
||||
version = "0.3.3"
|
||||
|
||||
[dependencies.flate2]
|
||||
version = "1.0.11"
|
||||
|
||||
[dependencies.miniz_oxide]
|
||||
version = "0.7.1"
|
||||
features = ["simd"]
|
||||
|
||||
[dev-dependencies.byteorder]
|
||||
version = "1.5.0"
|
||||
|
||||
[dev-dependencies.clap]
|
||||
version = "3.0"
|
||||
features = ["derive"]
|
||||
|
||||
[dev-dependencies.criterion]
|
||||
version = "0.4.0"
|
||||
|
||||
[dev-dependencies.getopts]
|
||||
version = "0.2.14"
|
||||
|
||||
[dev-dependencies.glium]
|
||||
version = "0.32"
|
||||
features = ["glutin"]
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.glob]
|
||||
version = "0.3"
|
||||
|
||||
[dev-dependencies.rand]
|
||||
version = "0.8.4"
|
||||
|
||||
[dev-dependencies.term]
|
||||
version = "0.7"
|
||||
|
||||
[features]
|
||||
benchmarks = []
|
||||
unstable = []
|
||||
201
third-party/vendor/png/LICENSE-APACHE
vendored
Normal file
201
third-party/vendor/png/LICENSE-APACHE
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
25
third-party/vendor/png/LICENSE-MIT
vendored
Normal file
25
third-party/vendor/png/LICENSE-MIT
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2015 nwin
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
39
third-party/vendor/png/README.md
vendored
Normal file
39
third-party/vendor/png/README.md
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# PNG Decoder/Encoder
|
||||
[](https://github.com/image-rs/image-png/actions)
|
||||
[](https://docs.rs/png)
|
||||
[](https://crates.io/crates/png)
|
||||

|
||||
[](https://github.com/image-rs/image-png)
|
||||
[](https://app.fuzzit.dev/orgs/image-rs/dashboard)
|
||||
|
||||
PNG decoder/encoder in pure Rust.
|
||||
|
||||
It contains all features required to handle the entirety of [the PngSuite by
|
||||
Willem van Schack][PngSuite].
|
||||
|
||||
[PngSuite]: http://www.schaik.com/pngsuite2011/pngsuite.html
|
||||
|
||||
## pngcheck
|
||||
|
||||
The `pngcheck` utility is a small demonstration binary that checks and prints
|
||||
metadata on every `.png` image provided via parameter. You can run it (for
|
||||
example on the test directories) with
|
||||
|
||||
```bash
|
||||
cargo run --release --example pngcheck ./tests/pngsuite/*
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of
|
||||
|
||||
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
|
||||
additional terms or conditions.
|
||||
6
third-party/vendor/png/benches/README.md
vendored
Normal file
6
third-party/vendor/png/benches/README.md
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
# Getting started with benchmarking
|
||||
|
||||
To run the benchmarks you need a nightly rust toolchain.
|
||||
Then you launch it with
|
||||
|
||||
rustup run nightly cargo bench --features=benchmarks
|
||||
81
third-party/vendor/png/benches/decoder.rs
vendored
Normal file
81
third-party/vendor/png/benches/decoder.rs
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
use std::fs;
|
||||
|
||||
use criterion::{
|
||||
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, Throughput,
|
||||
};
|
||||
use png::{Decoder, Reader, Transformations};
|
||||
|
||||
#[path = "../src/test_utils.rs"]
|
||||
mod test_utils;
|
||||
|
||||
fn load_all(c: &mut Criterion) {
|
||||
let mut g = c.benchmark_group("decode");
|
||||
for entry in fs::read_dir("tests/benches/").unwrap().flatten() {
|
||||
match entry.path().extension() {
|
||||
Some(st) if st == "png" => {}
|
||||
_ => continue,
|
||||
}
|
||||
|
||||
let data = fs::read(entry.path()).unwrap();
|
||||
bench_file(&mut g, data, entry.file_name().into_string().unwrap());
|
||||
}
|
||||
g.finish();
|
||||
|
||||
// Small IDATS
|
||||
let mut g = c.benchmark_group("generated-noncompressed-4k-idat");
|
||||
bench_noncompressed_png(&mut g, 8, 4096); // 256 B
|
||||
bench_noncompressed_png(&mut g, 128, 4096); // 64 KB
|
||||
bench_noncompressed_png(&mut g, 2048, 4096); // 16 MB
|
||||
bench_noncompressed_png(&mut g, 12288, 4096); // 576 MB
|
||||
g.finish();
|
||||
|
||||
// Normal IDATS
|
||||
let mut g = c.benchmark_group("generated-noncompressed-64k-idat");
|
||||
bench_noncompressed_png(&mut g, 128, 65536); // 64 KB
|
||||
bench_noncompressed_png(&mut g, 2048, 65536); // 16 MB
|
||||
bench_noncompressed_png(&mut g, 12288, 65536); // 576 MB
|
||||
g.finish();
|
||||
|
||||
// Large IDATS
|
||||
let mut g = c.benchmark_group("generated-noncompressed-2g-idat");
|
||||
bench_noncompressed_png(&mut g, 2048, 0x7fffffff); // 16 MB
|
||||
bench_noncompressed_png(&mut g, 12288, 0x7fffffff); // 576 MB
|
||||
g.finish();
|
||||
}
|
||||
|
||||
criterion_group! {benches, load_all}
|
||||
criterion_main!(benches);
|
||||
|
||||
fn bench_noncompressed_png(g: &mut BenchmarkGroup<WallTime>, size: u32, idat_bytes: usize) {
|
||||
let mut data = Vec::new();
|
||||
test_utils::write_noncompressed_png(&mut data, size, idat_bytes);
|
||||
bench_file(g, data, format!("{size}x{size}.png"));
|
||||
}
|
||||
|
||||
fn bench_file(g: &mut BenchmarkGroup<WallTime>, data: Vec<u8>, name: String) {
|
||||
if data.len() > 1_000_000 {
|
||||
g.sample_size(10);
|
||||
}
|
||||
|
||||
fn create_reader(data: &[u8]) -> Reader<&[u8]> {
|
||||
let mut decoder = Decoder::new(data);
|
||||
|
||||
// Cover default transformations used by the `image` crate when constructing
|
||||
// `image::codecs::png::PngDecoder`.
|
||||
decoder.set_transformations(Transformations::EXPAND);
|
||||
|
||||
decoder.read_info().unwrap()
|
||||
}
|
||||
|
||||
let mut reader = create_reader(data.as_slice());
|
||||
let mut image = vec![0; reader.output_buffer_size()];
|
||||
let info = reader.next_frame(&mut image).unwrap();
|
||||
|
||||
g.throughput(Throughput::Bytes(info.buffer_size() as u64));
|
||||
g.bench_with_input(name, &data, |b, data| {
|
||||
b.iter(|| {
|
||||
let mut reader = create_reader(data.as_slice());
|
||||
reader.next_frame(&mut image).unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
155
third-party/vendor/png/benches/expand_paletted.rs
vendored
Normal file
155
third-party/vendor/png/benches/expand_paletted.rs
vendored
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
//! Usage example:
|
||||
//!
|
||||
//! ```
|
||||
//! $ alias bench="rustup run nightly cargo bench"
|
||||
//! $ bench --bench=expand_paletted --features=benchmarks -- --save-baseline my_baseline
|
||||
//! ... tweak something ...
|
||||
//! $ bench --bench=expand_paletted --features=benchmarks -- --baseline my_baseline
|
||||
//! ```
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
use png::benchable_apis::{create_info_from_plte_trns_bitdepth, create_transform_fn, TransformFn};
|
||||
use png::{Info, Transformations};
|
||||
use rand::Rng;
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
enum TrnsPresence {
|
||||
Present,
|
||||
Absent,
|
||||
}
|
||||
|
||||
impl Display for TrnsPresence {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
TrnsPresence::Present => write!(f, "trns=yes"),
|
||||
TrnsPresence::Absent => write!(f, "trns=no"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_paletted_all(c: &mut Criterion) {
|
||||
let trns_options = [TrnsPresence::Absent, TrnsPresence::Present];
|
||||
let bit_depths = [4, 8];
|
||||
|
||||
let input_size = {
|
||||
let typical_l1_cache_size = 32 * 1024;
|
||||
let mut factor = 1; // input
|
||||
factor += 4; // RGBA output
|
||||
factor += 1; // other data
|
||||
typical_l1_cache_size / factor
|
||||
};
|
||||
|
||||
for trns in trns_options.iter().copied() {
|
||||
for bit_depth in bit_depths.iter().copied() {
|
||||
bench_expand_palette(c, trns, bit_depth, input_size);
|
||||
}
|
||||
}
|
||||
|
||||
bench_create_fn(c, 256, 256); // Full PLTE and trNS
|
||||
bench_create_fn(c, 224, 32); // Partial PLTE and trNS
|
||||
bench_create_fn(c, 16, 1); // Guess: typical for small images?
|
||||
}
|
||||
|
||||
criterion_group!(benches, expand_paletted_all);
|
||||
criterion_main!(benches);
|
||||
|
||||
fn get_random_bytes<R: Rng>(rng: &mut R, n: usize) -> Vec<u8> {
|
||||
use rand::Fill;
|
||||
let mut result = vec![0u8; n];
|
||||
result.as_mut_slice().try_fill(rng).unwrap();
|
||||
result
|
||||
}
|
||||
|
||||
struct Input {
|
||||
palette: Vec<u8>,
|
||||
trns: Option<Vec<u8>>,
|
||||
src: Vec<u8>,
|
||||
src_bit_depth: u8,
|
||||
}
|
||||
|
||||
impl Input {
|
||||
fn new(trns: TrnsPresence, src_bit_depth: u8, input_size_in_bytes: usize) -> Self {
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
// We provide RGB entries for 192 out of 256 possible indices and Alpha/Transparency
|
||||
// entries for 32 out of 256 possible indices. Rationale for these numbers:
|
||||
// * Oftentimes only a handful of colors at the edges of an icon need transparency
|
||||
// * In general, code needs to handle out-of-bounds indices, so it seems desirable
|
||||
// to explicitly test this.
|
||||
let palette = get_random_bytes(&mut rng, 192.min(input_size_in_bytes) * 3);
|
||||
let trns = match trns {
|
||||
TrnsPresence::Absent => None,
|
||||
TrnsPresence::Present => Some(get_random_bytes(&mut rng, 32.min(input_size_in_bytes))),
|
||||
};
|
||||
let src = get_random_bytes(&mut rng, input_size_in_bytes);
|
||||
|
||||
Self {
|
||||
palette,
|
||||
trns,
|
||||
src,
|
||||
src_bit_depth,
|
||||
}
|
||||
}
|
||||
|
||||
fn output_size_in_bytes(&self) -> usize {
|
||||
let output_bytes_per_input_sample = match self.trns {
|
||||
None => 3,
|
||||
Some(_) => 4,
|
||||
};
|
||||
let samples_count_per_byte = (8 / self.src_bit_depth) as usize;
|
||||
let samples_count = self.src.len() * samples_count_per_byte;
|
||||
samples_count * output_bytes_per_input_sample
|
||||
}
|
||||
|
||||
fn to_info(&self) -> Info {
|
||||
create_info_from_plte_trns_bitdepth(&self.palette, self.trns.as_deref(), self.src_bit_depth)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn create_expand_palette_fn(info: &Info) -> TransformFn {
|
||||
create_transform_fn(info, Transformations::EXPAND).unwrap()
|
||||
}
|
||||
|
||||
fn bench_create_fn(c: &mut Criterion, plte_size: usize, trns_size: usize) {
|
||||
let mut group = c.benchmark_group("expand_paletted(ctor)");
|
||||
group.sample_size(1000);
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let plte = get_random_bytes(&mut rng, 3 * plte_size as usize);
|
||||
let trns = get_random_bytes(&mut rng, trns_size as usize);
|
||||
let info = create_info_from_plte_trns_bitdepth(&plte, Some(&trns), 8);
|
||||
group.bench_with_input(
|
||||
format!("plte={plte_size}/trns={trns_size:?}"),
|
||||
&info,
|
||||
|b, info| {
|
||||
b.iter(|| create_expand_palette_fn(info));
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
fn bench_expand_palette(
|
||||
c: &mut Criterion,
|
||||
trns: TrnsPresence,
|
||||
src_bit_depth: u8,
|
||||
input_size_in_bytes: usize,
|
||||
) {
|
||||
let mut group = c.benchmark_group("expand_paletted(exec)");
|
||||
|
||||
let input = Input::new(trns, src_bit_depth, input_size_in_bytes);
|
||||
let transform_fn = create_expand_palette_fn(&input.to_info());
|
||||
group.throughput(Throughput::Bytes(input.output_size_in_bytes() as u64));
|
||||
group.sample_size(500);
|
||||
group.bench_with_input(
|
||||
format!("{trns}/src_bits={src_bit_depth}/src_size={input_size_in_bytes}"),
|
||||
&input,
|
||||
|b, input| {
|
||||
let mut output = vec![0; input.output_size_in_bytes()];
|
||||
let info = input.to_info();
|
||||
b.iter(|| {
|
||||
transform_fn(input.src.as_slice(), output.as_mut_slice(), &info);
|
||||
});
|
||||
},
|
||||
);
|
||||
}
|
||||
56
third-party/vendor/png/benches/unfilter.rs
vendored
Normal file
56
third-party/vendor/png/benches/unfilter.rs
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
//! Usage example:
|
||||
//!
|
||||
//! ```
|
||||
//! $ alias bench="rustup run nightly cargo bench"
|
||||
//! $ bench --bench=unfilter --features=benchmarks,unstable -- --save-baseline my_baseline
|
||||
//! ... tweak something, say the Sub filter ...
|
||||
//! $ bench --bench=unfilter --features=benchmarks,unstable -- filter=Sub --baseline my_baseline
|
||||
//! ```
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
|
||||
use png::benchable_apis::unfilter;
|
||||
use png::FilterType;
|
||||
use rand::Rng;
|
||||
|
||||
fn unfilter_all(c: &mut Criterion) {
|
||||
let bpps = [1, 2, 3, 4, 6, 8];
|
||||
let filters = [
|
||||
FilterType::Sub,
|
||||
FilterType::Up,
|
||||
FilterType::Avg,
|
||||
FilterType::Paeth,
|
||||
];
|
||||
for &filter in filters.iter() {
|
||||
for &bpp in bpps.iter() {
|
||||
bench_unfilter(c, filter, bpp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, unfilter_all);
|
||||
criterion_main!(benches);
|
||||
|
||||
fn bench_unfilter(c: &mut Criterion, filter: FilterType, bpp: u8) {
|
||||
let mut group = c.benchmark_group("unfilter");
|
||||
|
||||
fn get_random_bytes<R: Rng>(rng: &mut R, n: usize) -> Vec<u8> {
|
||||
use rand::Fill;
|
||||
let mut result = vec![0u8; n];
|
||||
result.as_mut_slice().try_fill(rng).unwrap();
|
||||
result
|
||||
}
|
||||
let mut rng = rand::thread_rng();
|
||||
let row_size = 4096 * (bpp as usize);
|
||||
let two_rows = get_random_bytes(&mut rng, row_size * 2);
|
||||
|
||||
group.throughput(Throughput::Bytes(row_size as u64));
|
||||
group.bench_with_input(
|
||||
format!("filter={filter:?}/bpp={bpp}"),
|
||||
&two_rows,
|
||||
|b, two_rows| {
|
||||
let (prev_row, curr_row) = two_rows.split_at(row_size);
|
||||
let mut curr_row = curr_row.to_vec();
|
||||
b.iter(|| unfilter(filter, bpp, prev_row, curr_row.as_mut_slice()));
|
||||
},
|
||||
);
|
||||
}
|
||||
52
third-party/vendor/png/examples/change-png-info.rs
vendored
Normal file
52
third-party/vendor/png/examples/change-png-info.rs
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/// Tests "editing"/re-encoding of an image:
|
||||
/// decoding, editing, re-encoding
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
use std::path::Path;
|
||||
pub type BoxResult<T> = Result<T, Box<dyn std::error::Error + Send + Sync>>;
|
||||
|
||||
fn main() -> BoxResult<()> {
|
||||
// # Decode
|
||||
// Read test image from pngsuite
|
||||
let path_in = Path::new(r"./tests/pngsuite/basi0g01.png");
|
||||
// The decoder is a build for reader and can be used to set various decoding options
|
||||
// via `Transformations`. The default output transformation is `Transformations::IDENTITY`.
|
||||
let decoder = png::Decoder::new(File::open(path_in)?);
|
||||
let mut reader = decoder.read_info()?;
|
||||
// Allocate the output buffer.
|
||||
let png_info = reader.info();
|
||||
let mut buf = vec![0; reader.output_buffer_size()];
|
||||
dbg!(png_info);
|
||||
|
||||
// # Encode
|
||||
let path_out = Path::new(r"./target/test_modified.png");
|
||||
let file = File::create(path_out)?;
|
||||
let ref mut w = BufWriter::new(file);
|
||||
|
||||
// Get defaults for interlaced parameter.
|
||||
let mut info_out = png_info.clone();
|
||||
let info_default = png::Info::default();
|
||||
|
||||
// Edit previous info
|
||||
info_out.interlaced = info_default.interlaced;
|
||||
let mut encoder = png::Encoder::with_info(w, info_out)?;
|
||||
encoder.set_depth(png_info.bit_depth);
|
||||
|
||||
// Edit some attribute
|
||||
encoder.add_text_chunk(
|
||||
"Testing tEXt".to_string(),
|
||||
"This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
|
||||
)?;
|
||||
|
||||
// Save picture with changed info
|
||||
let mut writer = encoder.write_header()?;
|
||||
let mut counter = 0u8;
|
||||
while let Ok(info) = reader.next_frame(&mut buf) {
|
||||
let bytes = &buf[..info.buffer_size()];
|
||||
println!("{} {}", info.buffer_size(), reader.output_buffer_size());
|
||||
writer.write_image_data(&bytes)?;
|
||||
counter += 1;
|
||||
println!("Written frame: {}", counter);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
198
third-party/vendor/png/examples/corpus-bench.rs
vendored
Normal file
198
third-party/vendor/png/examples/corpus-bench.rs
vendored
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
use std::{fs, path::PathBuf};
|
||||
|
||||
use clap::Parser;
|
||||
use png::Decoder;
|
||||
|
||||
#[derive(clap::ValueEnum, Clone)]
|
||||
enum Speed {
|
||||
Fast,
|
||||
Default,
|
||||
Best,
|
||||
}
|
||||
|
||||
#[derive(clap::ValueEnum, Clone)]
|
||||
enum Filter {
|
||||
None,
|
||||
Sub,
|
||||
Up,
|
||||
Average,
|
||||
Paeth,
|
||||
Adaptive,
|
||||
}
|
||||
|
||||
#[derive(clap::Parser)]
|
||||
struct Args {
|
||||
directory: Option<PathBuf>,
|
||||
#[clap(short, long, value_enum, default_value_t = Speed::Fast)]
|
||||
speed: Speed,
|
||||
#[clap(short, long, value_enum, default_value_t = Filter::Adaptive)]
|
||||
filter: Filter,
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn run_encode(
|
||||
args: &Args,
|
||||
dimensions: (u32, u32),
|
||||
color_type: png::ColorType,
|
||||
bit_depth: png::BitDepth,
|
||||
image: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let mut reencoded = Vec::new();
|
||||
let mut encoder = png::Encoder::new(&mut reencoded, dimensions.0, dimensions.1);
|
||||
encoder.set_color(color_type);
|
||||
encoder.set_depth(bit_depth);
|
||||
encoder.set_compression(match args.speed {
|
||||
Speed::Fast => png::Compression::Fast,
|
||||
Speed::Default => png::Compression::Default,
|
||||
Speed::Best => png::Compression::Best,
|
||||
});
|
||||
encoder.set_filter(match args.filter {
|
||||
Filter::None => png::FilterType::NoFilter,
|
||||
Filter::Sub => png::FilterType::Sub,
|
||||
Filter::Up => png::FilterType::Up,
|
||||
Filter::Average => png::FilterType::Avg,
|
||||
Filter::Paeth => png::FilterType::Paeth,
|
||||
Filter::Adaptive => png::FilterType::Paeth,
|
||||
});
|
||||
encoder.set_adaptive_filter(match args.filter {
|
||||
Filter::Adaptive => png::AdaptiveFilterType::Adaptive,
|
||||
_ => png::AdaptiveFilterType::NonAdaptive,
|
||||
});
|
||||
let mut encoder = encoder.write_header().unwrap();
|
||||
encoder.write_image_data(image).unwrap();
|
||||
encoder.finish().unwrap();
|
||||
reencoded
|
||||
}
|
||||
|
||||
#[inline(never)]
|
||||
fn run_decode(image: &[u8], output: &mut [u8]) {
|
||||
let mut reader = Decoder::new(image).read_info().unwrap();
|
||||
reader.next_frame(output).unwrap();
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut total_uncompressed = 0;
|
||||
let mut total_compressed = 0;
|
||||
let mut total_pixels = 0;
|
||||
let mut total_encode_time = 0;
|
||||
let mut total_decode_time = 0;
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
println!(
|
||||
"{:45} Ratio Encode Decode",
|
||||
"Directory"
|
||||
);
|
||||
println!(
|
||||
"{:45}------- -------------------- --------------------",
|
||||
"---------"
|
||||
);
|
||||
|
||||
let mut image2 = Vec::new();
|
||||
|
||||
let mut pending = vec![args.directory.clone().unwrap_or(PathBuf::from("."))];
|
||||
while let Some(directory) = pending.pop() {
|
||||
let mut dir_uncompressed = 0;
|
||||
let mut dir_compressed = 0;
|
||||
let mut dir_pixels = 0;
|
||||
let mut dir_encode_time = 0;
|
||||
let mut dir_decode_time = 0;
|
||||
|
||||
for entry in fs::read_dir(&directory).unwrap().flatten() {
|
||||
if entry.file_type().unwrap().is_dir() {
|
||||
pending.push(entry.path());
|
||||
continue;
|
||||
}
|
||||
|
||||
match entry.path().extension() {
|
||||
Some(st) if st == "png" => {}
|
||||
_ => continue,
|
||||
}
|
||||
|
||||
// Parse
|
||||
let data = fs::read(entry.path()).unwrap();
|
||||
let mut decoder = Decoder::new(&*data);
|
||||
if decoder.read_header_info().ok().map(|h| h.color_type)
|
||||
== Some(png::ColorType::Indexed)
|
||||
{
|
||||
decoder.set_transformations(
|
||||
png::Transformations::EXPAND | png::Transformations::STRIP_16,
|
||||
);
|
||||
}
|
||||
let mut reader = match decoder.read_info() {
|
||||
Ok(reader) => reader,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let mut image = vec![0; reader.output_buffer_size()];
|
||||
let info = match reader.next_frame(&mut image) {
|
||||
Ok(info) => info,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let (width, height) = (info.width, info.height);
|
||||
let bit_depth = info.bit_depth;
|
||||
let mut color_type = info.color_type;
|
||||
|
||||
// qoibench expands grayscale to RGB, so we do the same.
|
||||
if bit_depth == png::BitDepth::Eight {
|
||||
if color_type == png::ColorType::Grayscale {
|
||||
image = image.into_iter().flat_map(|v| [v, v, v, 255]).collect();
|
||||
color_type = png::ColorType::Rgba;
|
||||
} else if color_type == png::ColorType::GrayscaleAlpha {
|
||||
image = image
|
||||
.chunks_exact(2)
|
||||
.flat_map(|v| [v[0], v[0], v[0], v[1]])
|
||||
.collect();
|
||||
color_type = png::ColorType::Rgba;
|
||||
}
|
||||
}
|
||||
|
||||
// Re-encode
|
||||
let start = std::time::Instant::now();
|
||||
let reencoded = run_encode(&args, (width, height), color_type, bit_depth, &image);
|
||||
let elapsed = start.elapsed().as_nanos() as u64;
|
||||
|
||||
// And decode again
|
||||
image2.resize(image.len(), 0);
|
||||
let start2 = std::time::Instant::now();
|
||||
run_decode(&reencoded, &mut image2);
|
||||
let elapsed2 = start2.elapsed().as_nanos() as u64;
|
||||
|
||||
assert_eq!(image, image2);
|
||||
|
||||
// Stats
|
||||
dir_uncompressed += image.len();
|
||||
dir_compressed += reencoded.len();
|
||||
dir_pixels += (width * height) as u64;
|
||||
dir_encode_time += elapsed;
|
||||
dir_decode_time += elapsed2;
|
||||
}
|
||||
if dir_uncompressed > 0 {
|
||||
println!(
|
||||
"{:45}{:6.2}%{:8} mps {:6.2} GiB/s {:8} mps {:6.2} GiB/s",
|
||||
directory.display(),
|
||||
100.0 * dir_compressed as f64 / dir_uncompressed as f64,
|
||||
dir_pixels * 1000 / dir_encode_time,
|
||||
dir_uncompressed as f64 / (dir_encode_time as f64 * 1e-9 * (1 << 30) as f64),
|
||||
dir_pixels * 1000 / dir_decode_time,
|
||||
dir_uncompressed as f64 / (dir_decode_time as f64 * 1e-9 * (1 << 30) as f64)
|
||||
);
|
||||
}
|
||||
|
||||
total_uncompressed += dir_uncompressed;
|
||||
total_compressed += dir_compressed;
|
||||
total_pixels += dir_pixels;
|
||||
total_encode_time += dir_encode_time;
|
||||
total_decode_time += dir_decode_time;
|
||||
}
|
||||
|
||||
println!();
|
||||
println!(
|
||||
"{:44}{:7.3}%{:8} mps {:6.3} GiB/s {:8} mps {:6.3} GiB/s",
|
||||
"Total",
|
||||
100.0 * total_compressed as f64 / total_uncompressed as f64,
|
||||
total_pixels * 1000 / total_encode_time,
|
||||
total_uncompressed as f64 / (total_encode_time as f64 * 1e-9 * (1 << 30) as f64),
|
||||
total_pixels * 1000 / total_decode_time,
|
||||
total_uncompressed as f64 / (total_decode_time as f64 * 1e-9 * (1 << 30) as f64)
|
||||
);
|
||||
}
|
||||
55
third-party/vendor/png/examples/png-generate.rs
vendored
Normal file
55
third-party/vendor/png/examples/png-generate.rs
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
// For reading and opening files
|
||||
use png::text_metadata::{ITXtChunk, ZTXtChunk};
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::BufWriter;
|
||||
|
||||
fn main() {
|
||||
let path = env::args()
|
||||
.nth(1)
|
||||
.expect("Expected a filename to output to.");
|
||||
let file = File::create(path).unwrap();
|
||||
let w = &mut BufWriter::new(file);
|
||||
|
||||
let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
|
||||
encoder.set_color(png::ColorType::Rgba);
|
||||
encoder.set_depth(png::BitDepth::Eight);
|
||||
// Adding text chunks to the header
|
||||
encoder
|
||||
.add_text_chunk(
|
||||
"Testing tEXt".to_string(),
|
||||
"This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
|
||||
)
|
||||
.unwrap();
|
||||
encoder
|
||||
.add_ztxt_chunk(
|
||||
"Testing zTXt".to_string(),
|
||||
"This is a zTXt chunk that is compressed in the png file.".to_string(),
|
||||
)
|
||||
.unwrap();
|
||||
encoder
|
||||
.add_itxt_chunk(
|
||||
"Testing iTXt".to_string(),
|
||||
"iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut writer = encoder.write_header().unwrap();
|
||||
|
||||
let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
|
||||
writer.write_image_data(&data).unwrap(); // Save
|
||||
|
||||
// We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
|
||||
let tail_ztxt_chunk = ZTXtChunk::new(
|
||||
"Comment".to_string(),
|
||||
"A zTXt chunk after the image data.".to_string(),
|
||||
);
|
||||
writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
|
||||
|
||||
// The fields of the text chunk are public, so they can be mutated before being written to the file.
|
||||
let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
|
||||
tail_itxt_chunk.compressed = true;
|
||||
tail_itxt_chunk.language_tag = "hi".to_string();
|
||||
tail_itxt_chunk.translated_keyword = "लेखक".to_string();
|
||||
writer.write_text_chunk(&tail_itxt_chunk).unwrap();
|
||||
}
|
||||
381
third-party/vendor/png/examples/pngcheck.rs
vendored
Normal file
381
third-party/vendor/png/examples/pngcheck.rs
vendored
Normal file
|
|
@ -0,0 +1,381 @@
|
|||
#![allow(non_upper_case_globals)]
|
||||
|
||||
extern crate getopts;
|
||||
extern crate glob;
|
||||
extern crate png;
|
||||
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
|
||||
use getopts::{Matches, Options, ParsingStyle};
|
||||
use term::{color, Attr};
|
||||
|
||||
fn parse_args() -> Matches {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let mut opts = Options::new();
|
||||
opts.optflag("c", "", "colorize output (for ANSI terminals)")
|
||||
.optflag("q", "", "test quietly (output only errors)")
|
||||
.optflag(
|
||||
"t",
|
||||
"",
|
||||
"print contents of tEXt/zTXt/iTXt chunks (can be used with -q)",
|
||||
)
|
||||
.optflag("v", "", "test verbosely (print most chunk data)")
|
||||
.parsing_style(ParsingStyle::StopAtFirstFree);
|
||||
if args.len() > 1 {
|
||||
match opts.parse(&args[1..]) {
|
||||
Ok(matches) => return matches,
|
||||
Err(err) => println!("{}", err),
|
||||
}
|
||||
}
|
||||
println!("{}", opts.usage("Usage: pngcheck [-cpt] [file ...]"));
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct Config {
|
||||
quiet: bool,
|
||||
verbose: bool,
|
||||
color: bool,
|
||||
text: bool,
|
||||
}
|
||||
|
||||
fn display_interlaced(i: bool) -> &'static str {
|
||||
if i {
|
||||
"interlaced"
|
||||
} else {
|
||||
"non-interlaced"
|
||||
}
|
||||
}
|
||||
|
||||
fn display_image_type(bits: u8, color: png::ColorType) -> String {
|
||||
use png::ColorType::*;
|
||||
format!(
|
||||
"{}-bit {}",
|
||||
bits,
|
||||
match color {
|
||||
Grayscale => "grayscale",
|
||||
Rgb => "RGB",
|
||||
Indexed => "palette",
|
||||
GrayscaleAlpha => "grayscale+alpha",
|
||||
Rgba => "RGB+alpha",
|
||||
}
|
||||
)
|
||||
}
|
||||
// channels after expansion of tRNS
|
||||
fn final_channels(c: png::ColorType, trns: bool) -> u8 {
|
||||
use png::ColorType::*;
|
||||
match c {
|
||||
Grayscale => 1 + u8::from(trns),
|
||||
Rgb => 3,
|
||||
Indexed => 3 + u8::from(trns),
|
||||
GrayscaleAlpha => 2,
|
||||
Rgba => 4,
|
||||
}
|
||||
}
|
||||
fn check_image<P: AsRef<Path>>(c: Config, fname: P) -> io::Result<()> {
|
||||
// TODO improve performance by resusing allocations from decoder
|
||||
use png::Decoded::*;
|
||||
let mut t = term::stdout()
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "could not open terminal"))?;
|
||||
let data = &mut vec![0; 10 * 1024][..];
|
||||
let mut reader = io::BufReader::new(File::open(&fname)?);
|
||||
let fname = fname.as_ref().to_string_lossy();
|
||||
let n = reader.read(data)?;
|
||||
let mut buf = &data[..n];
|
||||
let mut pos = 0;
|
||||
let mut decoder = png::StreamingDecoder::new();
|
||||
// Image data
|
||||
let mut width = 0;
|
||||
let mut height = 0;
|
||||
let mut color = png::ColorType::Grayscale;
|
||||
let mut bits = 0;
|
||||
let mut trns = false;
|
||||
let mut interlaced = false;
|
||||
let mut compressed_size = 0;
|
||||
let mut n_chunks = 0;
|
||||
let mut have_idat = false;
|
||||
macro_rules! c_ratio(
|
||||
// TODO add palette entries to compressed_size
|
||||
() => ({
|
||||
compressed_size as f32/(
|
||||
height as u64 *
|
||||
(width as u64 * final_channels(color, trns) as u64 * bits as u64 + 7)>>3
|
||||
) as f32
|
||||
});
|
||||
);
|
||||
let display_error = |err| -> Result<_, io::Error> {
|
||||
let mut t = term::stdout()
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "could not open terminal"))?;
|
||||
if c.verbose {
|
||||
if c.color {
|
||||
print!(": ");
|
||||
t.fg(color::RED)?;
|
||||
writeln!(t, "{}", err)?;
|
||||
t.attr(Attr::Bold)?;
|
||||
write!(t, "ERRORS DETECTED")?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
println!(": {}", err);
|
||||
print!("ERRORS DETECTED")
|
||||
}
|
||||
println!(" in {}", fname);
|
||||
} else {
|
||||
if !c.quiet {
|
||||
if c.color {
|
||||
t.fg(color::RED)?;
|
||||
t.attr(Attr::Bold)?;
|
||||
write!(t, "ERROR")?;
|
||||
t.reset()?;
|
||||
write!(t, ": ")?;
|
||||
t.fg(color::YELLOW)?;
|
||||
writeln!(t, "{}", fname)?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
println!("ERROR: {}", fname)
|
||||
}
|
||||
}
|
||||
print!("{}: ", fname);
|
||||
if c.color {
|
||||
t.fg(color::RED)?;
|
||||
writeln!(t, "{}", err)?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
println!("{}", err);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
if c.verbose {
|
||||
print!("File: ");
|
||||
if c.color {
|
||||
t.attr(Attr::Bold)?;
|
||||
write!(t, "{}", fname)?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
print!("{}", fname);
|
||||
}
|
||||
print!(" ({}) bytes", data.len())
|
||||
}
|
||||
loop {
|
||||
if buf.is_empty() {
|
||||
// circumvent borrow checker
|
||||
assert!(!data.is_empty());
|
||||
let n = reader.read(data)?;
|
||||
|
||||
// EOF
|
||||
if n == 0 {
|
||||
println!("ERROR: premature end of file {}", fname);
|
||||
break;
|
||||
}
|
||||
buf = &data[..n];
|
||||
}
|
||||
match decoder.update(buf, &mut Vec::new()) {
|
||||
Ok((_, ImageEnd)) => {
|
||||
if !have_idat {
|
||||
// This isn't beautiful. But it works.
|
||||
display_error(png::DecodingError::IoError(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"IDAT chunk missing",
|
||||
)))?;
|
||||
break;
|
||||
}
|
||||
if !c.verbose && !c.quiet {
|
||||
if c.color {
|
||||
t.fg(color::GREEN)?;
|
||||
t.attr(Attr::Bold)?;
|
||||
write!(t, "OK")?;
|
||||
t.reset()?;
|
||||
write!(t, ": ")?;
|
||||
t.fg(color::YELLOW)?;
|
||||
write!(t, "{}", fname)?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
print!("OK: {}", fname)
|
||||
}
|
||||
println!(
|
||||
" ({}x{}, {}{}, {}, {:.1}%)",
|
||||
width,
|
||||
height,
|
||||
display_image_type(bits, color),
|
||||
(if trns { "+trns" } else { "" }),
|
||||
display_interlaced(interlaced),
|
||||
100.0 * (1.0 - c_ratio!())
|
||||
)
|
||||
} else if !c.quiet {
|
||||
println!();
|
||||
if c.color {
|
||||
t.fg(color::GREEN)?;
|
||||
t.attr(Attr::Bold)?;
|
||||
write!(t, "No errors detected ")?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
print!("No errors detected ");
|
||||
}
|
||||
println!(
|
||||
"in {} ({} chunks, {:.1}% compression)",
|
||||
fname,
|
||||
n_chunks,
|
||||
100.0 * (1.0 - c_ratio!()),
|
||||
)
|
||||
}
|
||||
break;
|
||||
}
|
||||
Ok((n, res)) => {
|
||||
buf = &buf[n..];
|
||||
pos += n;
|
||||
match res {
|
||||
Header(w, h, b, c, i) => {
|
||||
width = w;
|
||||
height = h;
|
||||
bits = b as u8;
|
||||
color = c;
|
||||
interlaced = i;
|
||||
}
|
||||
ChunkBegin(len, type_str) => {
|
||||
use png::chunk;
|
||||
n_chunks += 1;
|
||||
if c.verbose {
|
||||
let chunk = type_str;
|
||||
println!();
|
||||
print!(" chunk ");
|
||||
if c.color {
|
||||
t.fg(color::YELLOW)?;
|
||||
write!(t, "{:?}", chunk)?;
|
||||
t.reset()?;
|
||||
} else {
|
||||
print!("{:?}", chunk)
|
||||
}
|
||||
print!(
|
||||
" at offset {:#07x}, length {}",
|
||||
pos - 4, // substract chunk name length
|
||||
len
|
||||
)
|
||||
}
|
||||
match type_str {
|
||||
chunk::IDAT => {
|
||||
have_idat = true;
|
||||
compressed_size += len
|
||||
}
|
||||
chunk::tRNS => {
|
||||
trns = true;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
ImageData => {
|
||||
//println!("got {} bytes of image data", data.len())
|
||||
}
|
||||
ChunkComplete(_, type_str) if c.verbose => {
|
||||
use png::chunk::*;
|
||||
if type_str == IHDR {
|
||||
println!();
|
||||
print!(
|
||||
" {} x {} image, {}{}, {}",
|
||||
width,
|
||||
height,
|
||||
display_image_type(bits, color),
|
||||
(if trns { "+trns" } else { "" }),
|
||||
display_interlaced(interlaced),
|
||||
);
|
||||
}
|
||||
}
|
||||
AnimationControl(actl) => {
|
||||
println!();
|
||||
print!(" {} frames, {} plays", actl.num_frames, actl.num_plays,);
|
||||
}
|
||||
FrameControl(fctl) => {
|
||||
println!();
|
||||
println!(
|
||||
" sequence #{}, {} x {} pixels @ ({}, {})",
|
||||
fctl.sequence_number,
|
||||
fctl.width,
|
||||
fctl.height,
|
||||
fctl.x_offset,
|
||||
fctl.y_offset,
|
||||
/*fctl.delay_num,
|
||||
fctl.delay_den,
|
||||
fctl.dispose_op,
|
||||
fctl.blend_op,*/
|
||||
);
|
||||
print!(
|
||||
" {}/{} s delay, dispose: {}, blend: {}",
|
||||
fctl.delay_num,
|
||||
if fctl.delay_den == 0 {
|
||||
100
|
||||
} else {
|
||||
fctl.delay_den
|
||||
},
|
||||
fctl.dispose_op,
|
||||
fctl.blend_op,
|
||||
);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
//println!("{} {:?}", n, res)
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = display_error(err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.text {
|
||||
println!("Parsed tEXt chunks:");
|
||||
for text_chunk in &decoder.info().unwrap().uncompressed_latin1_text {
|
||||
println!("{:#?}", text_chunk);
|
||||
}
|
||||
|
||||
println!("Parsed zTXt chunks:");
|
||||
for text_chunk in &decoder.info().unwrap().compressed_latin1_text {
|
||||
let mut cloned_text_chunk = text_chunk.clone();
|
||||
cloned_text_chunk.decompress_text()?;
|
||||
println!("{:#?}", cloned_text_chunk);
|
||||
}
|
||||
|
||||
println!("Parsed iTXt chunks:");
|
||||
for text_chunk in &decoder.info().unwrap().utf8_text {
|
||||
let mut cloned_text_chunk = text_chunk.clone();
|
||||
cloned_text_chunk.decompress_text()?;
|
||||
println!("{:#?}", cloned_text_chunk);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let m = parse_args();
|
||||
|
||||
let config = Config {
|
||||
quiet: m.opt_present("q"),
|
||||
verbose: m.opt_present("v"),
|
||||
color: m.opt_present("c"),
|
||||
text: m.opt_present("t"),
|
||||
};
|
||||
|
||||
for file in m.free {
|
||||
let result = if file.contains('*') {
|
||||
glob::glob(&file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
.and_then(|mut glob| {
|
||||
glob.try_for_each(|entry| {
|
||||
entry
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
.and_then(|file| check_image(config, file))
|
||||
})
|
||||
})
|
||||
} else {
|
||||
check_image(config, &file)
|
||||
};
|
||||
|
||||
result.unwrap_or_else(|err| {
|
||||
println!("{}: {}", file, err);
|
||||
std::process::exit(1)
|
||||
});
|
||||
}
|
||||
}
|
||||
198
third-party/vendor/png/examples/show.rs
vendored
Normal file
198
third-party/vendor/png/examples/show.rs
vendored
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
use glium::{
|
||||
backend::glutin::Display,
|
||||
glutin::{
|
||||
self, dpi,
|
||||
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
|
||||
event_loop::ControlFlow,
|
||||
},
|
||||
texture::{ClientFormat, RawImage2d},
|
||||
BlitTarget, Rect, Surface,
|
||||
};
|
||||
use std::{borrow::Cow, env, fs::File, io, path};
|
||||
|
||||
/// Load the image using `png`
|
||||
fn load_image(path: &path::PathBuf) -> io::Result<RawImage2d<'static, u8>> {
|
||||
use png::ColorType::*;
|
||||
let mut decoder = png::Decoder::new(File::open(path)?);
|
||||
decoder.set_transformations(png::Transformations::normalize_to_color8());
|
||||
let mut reader = decoder.read_info()?;
|
||||
let mut img_data = vec![0; reader.output_buffer_size()];
|
||||
let info = reader.next_frame(&mut img_data)?;
|
||||
|
||||
let (data, format) = match info.color_type {
|
||||
Rgb => (img_data, ClientFormat::U8U8U8),
|
||||
Rgba => (img_data, ClientFormat::U8U8U8U8),
|
||||
Grayscale => (
|
||||
{
|
||||
let mut vec = Vec::with_capacity(img_data.len() * 3);
|
||||
for g in img_data {
|
||||
vec.extend([g, g, g].iter().cloned())
|
||||
}
|
||||
vec
|
||||
},
|
||||
ClientFormat::U8U8U8,
|
||||
),
|
||||
GrayscaleAlpha => (
|
||||
{
|
||||
let mut vec = Vec::with_capacity(img_data.len() * 3);
|
||||
for ga in img_data.chunks(2) {
|
||||
let g = ga[0];
|
||||
let a = ga[1];
|
||||
vec.extend([g, g, g, a].iter().cloned())
|
||||
}
|
||||
vec
|
||||
},
|
||||
ClientFormat::U8U8U8U8,
|
||||
),
|
||||
_ => unreachable!("uncovered color type"),
|
||||
};
|
||||
|
||||
Ok(RawImage2d {
|
||||
data: Cow::Owned(data),
|
||||
width: info.width,
|
||||
height: info.height,
|
||||
format,
|
||||
})
|
||||
}
|
||||
|
||||
fn main_loop(files: Vec<path::PathBuf>) -> io::Result<()> {
|
||||
let mut files = files.into_iter();
|
||||
let image = load_image(&files.next().unwrap())?;
|
||||
|
||||
let event_loop = glutin::event_loop::EventLoop::new();
|
||||
let window_builder = glutin::window::WindowBuilder::new().with_title("Show Example");
|
||||
let context_builder = glutin::ContextBuilder::new().with_vsync(true);
|
||||
let display = glium::Display::new(window_builder, context_builder, &event_loop)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
|
||||
resize_window(&display, &image);
|
||||
let mut texture = glium::Texture2d::new(&display, image).unwrap();
|
||||
draw(&display, &texture);
|
||||
|
||||
event_loop.run(move |event, _, control_flow| match event {
|
||||
Event::WindowEvent {
|
||||
event: WindowEvent::CloseRequested,
|
||||
..
|
||||
} => exit(control_flow),
|
||||
Event::WindowEvent {
|
||||
event:
|
||||
WindowEvent::KeyboardInput {
|
||||
input:
|
||||
KeyboardInput {
|
||||
state: ElementState::Pressed,
|
||||
virtual_keycode: code,
|
||||
..
|
||||
},
|
||||
..
|
||||
},
|
||||
..
|
||||
} => match code {
|
||||
Some(VirtualKeyCode::Escape) => exit(control_flow),
|
||||
Some(VirtualKeyCode::Right) => match &files.next() {
|
||||
Some(path) => {
|
||||
match load_image(path) {
|
||||
Ok(image) => {
|
||||
resize_window(&display, &image);
|
||||
texture = glium::Texture2d::new(&display, image).unwrap();
|
||||
draw(&display, &texture);
|
||||
}
|
||||
Err(err) => {
|
||||
println!("Error: {}", err);
|
||||
exit(control_flow);
|
||||
}
|
||||
};
|
||||
}
|
||||
None => exit(control_flow),
|
||||
},
|
||||
_ => {}
|
||||
},
|
||||
Event::RedrawRequested(_) => draw(&display, &texture),
|
||||
_ => {}
|
||||
});
|
||||
}
|
||||
|
||||
fn draw(display: &glium::Display, texture: &glium::Texture2d) {
|
||||
let frame = display.draw();
|
||||
fill_v_flipped(
|
||||
&texture.as_surface(),
|
||||
&frame,
|
||||
glium::uniforms::MagnifySamplerFilter::Linear,
|
||||
);
|
||||
frame.finish().unwrap();
|
||||
}
|
||||
|
||||
fn exit(control_flow: &mut ControlFlow) {
|
||||
*control_flow = ControlFlow::Exit;
|
||||
}
|
||||
|
||||
fn fill_v_flipped<S1, S2>(src: &S1, target: &S2, filter: glium::uniforms::MagnifySamplerFilter)
|
||||
where
|
||||
S1: Surface,
|
||||
S2: Surface,
|
||||
{
|
||||
let src_dim = src.get_dimensions();
|
||||
let src_rect = Rect {
|
||||
left: 0,
|
||||
bottom: 0,
|
||||
width: src_dim.0,
|
||||
height: src_dim.1,
|
||||
};
|
||||
let target_dim = target.get_dimensions();
|
||||
let target_rect = BlitTarget {
|
||||
left: 0,
|
||||
bottom: target_dim.1,
|
||||
width: target_dim.0 as i32,
|
||||
height: -(target_dim.1 as i32),
|
||||
};
|
||||
src.blit_color(&src_rect, target, &target_rect, filter);
|
||||
}
|
||||
|
||||
fn resize_window(display: &Display, image: &RawImage2d<'static, u8>) {
|
||||
let mut width = image.width;
|
||||
let mut height = image.height;
|
||||
if width < 50 && height < 50 {
|
||||
width *= 10;
|
||||
height *= 10;
|
||||
}
|
||||
display
|
||||
.gl_window()
|
||||
.window()
|
||||
.set_inner_size(dpi::LogicalSize::new(f64::from(width), f64::from(height)));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
if args.len() < 2 {
|
||||
println!("Usage: show files [...]");
|
||||
} else {
|
||||
let mut files = vec![];
|
||||
for file in args.iter().skip(1) {
|
||||
match if file.contains('*') {
|
||||
(|| -> io::Result<_> {
|
||||
for entry in glob::glob(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.msg))?
|
||||
{
|
||||
files.push(
|
||||
entry
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "glob error"))?,
|
||||
)
|
||||
}
|
||||
Ok(())
|
||||
})()
|
||||
} else {
|
||||
files.push(path::PathBuf::from(file));
|
||||
Ok(())
|
||||
} {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
println!("{}: {}", file, err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// "tests/pngsuite/pngsuite.png"
|
||||
match main_loop(files) {
|
||||
Ok(_) => (),
|
||||
Err(err) => println!("Error: {}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
384
third-party/vendor/png/src/adam7.rs
vendored
Normal file
384
third-party/vendor/png/src/adam7.rs
vendored
Normal file
|
|
@ -0,0 +1,384 @@
|
|||
//! Utility functions
|
||||
use std::iter::StepBy;
|
||||
use std::ops::Range;
|
||||
|
||||
/// This iterator iterates over the different passes of an image Adam7 encoded
|
||||
/// PNG image
|
||||
/// The pattern is:
|
||||
/// 16462646
|
||||
/// 77777777
|
||||
/// 56565656
|
||||
/// 77777777
|
||||
/// 36463646
|
||||
/// 77777777
|
||||
/// 56565656
|
||||
/// 77777777
|
||||
///
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Adam7Iterator {
|
||||
line: u32,
|
||||
lines: u32,
|
||||
line_width: u32,
|
||||
current_pass: u8,
|
||||
width: u32,
|
||||
height: u32,
|
||||
}
|
||||
|
||||
impl Adam7Iterator {
|
||||
pub fn new(width: u32, height: u32) -> Adam7Iterator {
|
||||
let mut this = Adam7Iterator {
|
||||
line: 0,
|
||||
lines: 0,
|
||||
line_width: 0,
|
||||
current_pass: 1,
|
||||
width,
|
||||
height,
|
||||
};
|
||||
this.init_pass();
|
||||
this
|
||||
}
|
||||
|
||||
/// Calculates the bounds of the current pass
|
||||
fn init_pass(&mut self) {
|
||||
let w = f64::from(self.width);
|
||||
let h = f64::from(self.height);
|
||||
let (line_width, lines) = match self.current_pass {
|
||||
1 => (w / 8.0, h / 8.0),
|
||||
2 => ((w - 4.0) / 8.0, h / 8.0),
|
||||
3 => (w / 4.0, (h - 4.0) / 8.0),
|
||||
4 => ((w - 2.0) / 4.0, h / 4.0),
|
||||
5 => (w / 2.0, (h - 2.0) / 4.0),
|
||||
6 => ((w - 1.0) / 2.0, h / 2.0),
|
||||
7 => (w, (h - 1.0) / 2.0),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
self.line_width = line_width.ceil() as u32;
|
||||
self.lines = lines.ceil() as u32;
|
||||
self.line = 0;
|
||||
}
|
||||
|
||||
/// The current pass#.
|
||||
pub fn current_pass(&self) -> u8 {
|
||||
self.current_pass
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over the (passes, lines, widths)
|
||||
impl Iterator for Adam7Iterator {
|
||||
type Item = (u8, u32, u32);
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.line < self.lines && self.line_width > 0 {
|
||||
let this_line = self.line;
|
||||
self.line += 1;
|
||||
Some((self.current_pass, this_line, self.line_width))
|
||||
} else if self.current_pass < 7 {
|
||||
self.current_pass += 1;
|
||||
self.init_pass();
|
||||
self.next()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn subbyte_pixels(scanline: &[u8], bits_pp: usize) -> impl Iterator<Item = u8> + '_ {
|
||||
(0..scanline.len() * 8)
|
||||
.step_by(bits_pp)
|
||||
.map(move |bit_idx| {
|
||||
let byte_idx = bit_idx / 8;
|
||||
|
||||
// sub-byte samples start in the high-order bits
|
||||
let rem = 8 - bit_idx % 8 - bits_pp;
|
||||
|
||||
match bits_pp {
|
||||
// evenly divides bytes
|
||||
1 => (scanline[byte_idx] >> rem) & 1,
|
||||
2 => (scanline[byte_idx] >> rem) & 3,
|
||||
4 => (scanline[byte_idx] >> rem) & 15,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Given pass, image width, and line number, produce an iterator of bit positions of pixels to copy
|
||||
/// from the input scanline to the image buffer.
|
||||
fn expand_adam7_bits(
|
||||
pass: u8,
|
||||
width: usize,
|
||||
line_no: usize,
|
||||
bits_pp: usize,
|
||||
) -> StepBy<Range<usize>> {
|
||||
let (line_mul, line_off, samp_mul, samp_off) = match pass {
|
||||
1 => (8, 0, 8, 0),
|
||||
2 => (8, 0, 8, 4),
|
||||
3 => (8, 4, 4, 0),
|
||||
4 => (4, 0, 4, 2),
|
||||
5 => (4, 2, 2, 0),
|
||||
6 => (2, 0, 2, 1),
|
||||
7 => (2, 1, 1, 0),
|
||||
_ => panic!("Adam7 pass out of range: {}", pass),
|
||||
};
|
||||
|
||||
// the equivalent line number in progressive scan
|
||||
let prog_line = line_mul * line_no + line_off;
|
||||
// line width is rounded up to the next byte
|
||||
let line_width = (width * bits_pp + 7) & !7;
|
||||
let line_start = prog_line * line_width;
|
||||
let start = line_start + (samp_off * bits_pp);
|
||||
let stop = line_start + (width * bits_pp);
|
||||
|
||||
(start..stop).step_by(bits_pp * samp_mul)
|
||||
}
|
||||
|
||||
/// Expands an Adam 7 pass
|
||||
pub fn expand_pass(
|
||||
img: &mut [u8],
|
||||
width: u32,
|
||||
scanline: &[u8],
|
||||
pass: u8,
|
||||
line_no: u32,
|
||||
bits_pp: u8,
|
||||
) {
|
||||
let width = width as usize;
|
||||
let line_no = line_no as usize;
|
||||
let bits_pp = bits_pp as usize;
|
||||
|
||||
// pass is out of range but don't blow up
|
||||
if pass == 0 || pass > 7 {
|
||||
return;
|
||||
}
|
||||
|
||||
let bit_indices = expand_adam7_bits(pass, width, line_no, bits_pp);
|
||||
|
||||
if bits_pp < 8 {
|
||||
for (pos, px) in bit_indices.zip(subbyte_pixels(scanline, bits_pp)) {
|
||||
let rem = 8 - pos % 8 - bits_pp;
|
||||
img[pos / 8] |= px << rem as u8;
|
||||
}
|
||||
} else {
|
||||
let bytes_pp = bits_pp / 8;
|
||||
|
||||
for (bitpos, px) in bit_indices.zip(scanline.chunks(bytes_pp)) {
|
||||
for (offset, val) in px.iter().enumerate() {
|
||||
img[bitpos / 8 + offset] = *val;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_adam7() {
|
||||
/*
|
||||
1646
|
||||
7777
|
||||
5656
|
||||
7777
|
||||
*/
|
||||
let it = Adam7Iterator::new(4, 4);
|
||||
let passes: Vec<_> = it.collect();
|
||||
assert_eq!(
|
||||
&*passes,
|
||||
&[
|
||||
(1, 0, 1),
|
||||
(4, 0, 1),
|
||||
(5, 0, 2),
|
||||
(6, 0, 2),
|
||||
(6, 1, 2),
|
||||
(7, 0, 4),
|
||||
(7, 1, 4)
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_subbyte_pixels() {
|
||||
let scanline = &[0b10101010, 0b10101010];
|
||||
|
||||
let pixels = subbyte_pixels(scanline, 1).collect::<Vec<_>>();
|
||||
assert_eq!(pixels.len(), 16);
|
||||
assert_eq!(pixels, [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_adam7_bits() {
|
||||
let width = 32;
|
||||
let bits_pp = 1;
|
||||
|
||||
let expected = |offset: usize, step: usize, count: usize| {
|
||||
(0..count)
|
||||
.map(move |i| step * i + offset)
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
for line_no in 0..8 {
|
||||
let start = 8 * line_no * width;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(1, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 8, 4)
|
||||
);
|
||||
|
||||
let start = start + 4;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(2, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 8, 4)
|
||||
);
|
||||
|
||||
let start = (8 * line_no + 4) * width;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(3, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 4, 8)
|
||||
);
|
||||
}
|
||||
|
||||
for line_no in 0..16 {
|
||||
let start = 4 * line_no * width + 2;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(4, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 4, 8)
|
||||
);
|
||||
|
||||
let start = (4 * line_no + 2) * width;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(5, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 2, 16)
|
||||
)
|
||||
}
|
||||
|
||||
for line_no in 0..32 {
|
||||
let start = 2 * line_no * width + 1;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(6, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 2, 16),
|
||||
"line_no: {}",
|
||||
line_no
|
||||
);
|
||||
|
||||
let start = (2 * line_no + 1) * width;
|
||||
|
||||
assert_eq!(
|
||||
expand_adam7_bits(7, width, line_no, bits_pp).collect::<Vec<_>>(),
|
||||
expected(start, 1, 32)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_pass_subbyte() {
|
||||
let mut img = [0u8; 8];
|
||||
let width = 8;
|
||||
let bits_pp = 1;
|
||||
|
||||
expand_pass(&mut img, width, &[0b10000000], 1, 0, bits_pp);
|
||||
assert_eq!(img, [0b10000000u8, 0, 0, 0, 0, 0, 0, 0]);
|
||||
|
||||
expand_pass(&mut img, width, &[0b10000000], 2, 0, bits_pp);
|
||||
assert_eq!(img, [0b10001000u8, 0, 0, 0, 0, 0, 0, 0]);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11000000], 3, 0, bits_pp);
|
||||
assert_eq!(img, [0b10001000u8, 0, 0, 0, 0b10001000, 0, 0, 0]);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11000000], 4, 0, bits_pp);
|
||||
assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10001000, 0, 0, 0]);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11000000], 4, 1, bits_pp);
|
||||
assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10101010, 0, 0, 0]);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11110000], 5, 0, bits_pp);
|
||||
assert_eq!(img, [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0, 0]);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11110000], 5, 1, bits_pp);
|
||||
assert_eq!(
|
||||
img,
|
||||
[0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11110000], 6, 0, bits_pp);
|
||||
assert_eq!(
|
||||
img,
|
||||
[0b11111111u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11110000], 6, 1, bits_pp);
|
||||
assert_eq!(
|
||||
img,
|
||||
[0b11111111u8, 0, 0b11111111, 0, 0b10101010, 0, 0b10101010, 0]
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11110000], 6, 2, bits_pp);
|
||||
assert_eq!(
|
||||
img,
|
||||
[0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b10101010, 0]
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11110000], 6, 3, bits_pp);
|
||||
assert_eq!(
|
||||
[0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b11111111, 0],
|
||||
img
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11111111], 7, 0, bits_pp);
|
||||
assert_eq!(
|
||||
[
|
||||
0b11111111u8,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0,
|
||||
0b11111111,
|
||||
0,
|
||||
0b11111111,
|
||||
0
|
||||
],
|
||||
img
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11111111], 7, 1, bits_pp);
|
||||
assert_eq!(
|
||||
[
|
||||
0b11111111u8,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0,
|
||||
0b11111111,
|
||||
0
|
||||
],
|
||||
img
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11111111], 7, 2, bits_pp);
|
||||
assert_eq!(
|
||||
[
|
||||
0b11111111u8,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0
|
||||
],
|
||||
img
|
||||
);
|
||||
|
||||
expand_pass(&mut img, width, &[0b11111111], 7, 3, bits_pp);
|
||||
assert_eq!(
|
||||
[
|
||||
0b11111111u8,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111,
|
||||
0b11111111
|
||||
],
|
||||
img
|
||||
);
|
||||
}
|
||||
29
third-party/vendor/png/src/benchable_apis.rs
vendored
Normal file
29
third-party/vendor/png/src/benchable_apis.rs
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
//! Development-time-only helper module for exporting private APIs so that they can be benchmarked.
|
||||
//! This module is gated behind the "benchmarks" feature.
|
||||
|
||||
use crate::common::BytesPerPixel;
|
||||
use crate::filter::FilterType;
|
||||
use crate::{BitDepth, ColorType, Info};
|
||||
|
||||
/// Re-exporting `unfilter` to make it easier to benchmark, despite some items being only
|
||||
/// `pub(crate)`: `fn unfilter`, `enum BytesPerPixel`.
|
||||
pub fn unfilter(filter: FilterType, tbpp: u8, previous: &[u8], current: &mut [u8]) {
|
||||
let tbpp = BytesPerPixel::from_usize(tbpp as usize);
|
||||
crate::filter::unfilter(filter, tbpp, previous, current)
|
||||
}
|
||||
|
||||
pub use crate::decoder::transform::{create_transform_fn, TransformFn};
|
||||
|
||||
pub fn create_info_from_plte_trns_bitdepth<'a>(
|
||||
plte: &'a [u8],
|
||||
trns: Option<&'a [u8]>,
|
||||
bit_depth: u8,
|
||||
) -> Info<'a> {
|
||||
Info {
|
||||
color_type: ColorType::Indexed,
|
||||
bit_depth: BitDepth::from_u8(bit_depth).unwrap(),
|
||||
palette: Some(plte.into()),
|
||||
trns: trns.map(Into::into),
|
||||
..Info::default()
|
||||
}
|
||||
}
|
||||
98
third-party/vendor/png/src/chunk.rs
vendored
Normal file
98
third-party/vendor/png/src/chunk.rs
vendored
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
//! Chunk types and functions
|
||||
#![allow(dead_code)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
use core::fmt;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub struct ChunkType(pub [u8; 4]);
|
||||
|
||||
// -- Critical chunks --
|
||||
|
||||
/// Image header
|
||||
pub const IHDR: ChunkType = ChunkType(*b"IHDR");
|
||||
/// Palette
|
||||
pub const PLTE: ChunkType = ChunkType(*b"PLTE");
|
||||
/// Image data
|
||||
pub const IDAT: ChunkType = ChunkType(*b"IDAT");
|
||||
/// Image trailer
|
||||
pub const IEND: ChunkType = ChunkType(*b"IEND");
|
||||
|
||||
// -- Ancillary chunks --
|
||||
|
||||
/// Transparency
|
||||
pub const tRNS: ChunkType = ChunkType(*b"tRNS");
|
||||
/// Background colour
|
||||
pub const bKGD: ChunkType = ChunkType(*b"bKGD");
|
||||
/// Image last-modification time
|
||||
pub const tIME: ChunkType = ChunkType(*b"tIME");
|
||||
/// Physical pixel dimensions
|
||||
pub const pHYs: ChunkType = ChunkType(*b"pHYs");
|
||||
/// Source system's pixel chromaticities
|
||||
pub const cHRM: ChunkType = ChunkType(*b"cHRM");
|
||||
/// Source system's gamma value
|
||||
pub const gAMA: ChunkType = ChunkType(*b"gAMA");
|
||||
/// sRGB color space chunk
|
||||
pub const sRGB: ChunkType = ChunkType(*b"sRGB");
|
||||
/// ICC profile chunk
|
||||
pub const iCCP: ChunkType = ChunkType(*b"iCCP");
|
||||
/// Latin-1 uncompressed textual data
|
||||
pub const tEXt: ChunkType = ChunkType(*b"tEXt");
|
||||
/// Latin-1 compressed textual data
|
||||
pub const zTXt: ChunkType = ChunkType(*b"zTXt");
|
||||
/// UTF-8 textual data
|
||||
pub const iTXt: ChunkType = ChunkType(*b"iTXt");
|
||||
|
||||
// -- Extension chunks --
|
||||
|
||||
/// Animation control
|
||||
pub const acTL: ChunkType = ChunkType(*b"acTL");
|
||||
/// Frame control
|
||||
pub const fcTL: ChunkType = ChunkType(*b"fcTL");
|
||||
/// Frame data
|
||||
pub const fdAT: ChunkType = ChunkType(*b"fdAT");
|
||||
|
||||
// -- Chunk type determination --
|
||||
|
||||
/// Returns true if the chunk is critical.
|
||||
pub fn is_critical(ChunkType(type_): ChunkType) -> bool {
|
||||
type_[0] & 32 == 0
|
||||
}
|
||||
|
||||
/// Returns true if the chunk is private.
|
||||
pub fn is_private(ChunkType(type_): ChunkType) -> bool {
|
||||
type_[1] & 32 != 0
|
||||
}
|
||||
|
||||
/// Checks whether the reserved bit of the chunk name is set.
|
||||
/// If it is set the chunk name is invalid.
|
||||
pub fn reserved_set(ChunkType(type_): ChunkType) -> bool {
|
||||
type_[2] & 32 != 0
|
||||
}
|
||||
|
||||
/// Returns true if the chunk is safe to copy if unknown.
|
||||
pub fn safe_to_copy(ChunkType(type_): ChunkType) -> bool {
|
||||
type_[3] & 32 != 0
|
||||
}
|
||||
|
||||
impl fmt::Debug for ChunkType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
struct DebugType([u8; 4]);
|
||||
|
||||
impl fmt::Debug for DebugType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
for &c in &self.0[..] {
|
||||
write!(f, "{}", char::from(c).escape_debug())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
f.debug_struct("ChunkType")
|
||||
.field("type", &DebugType(self.0))
|
||||
.field("critical", &is_critical(*self))
|
||||
.field("private", &is_private(*self))
|
||||
.field("reserved", &reserved_set(*self))
|
||||
.field("safecopy", &safe_to_copy(*self))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
812
third-party/vendor/png/src/common.rs
vendored
Normal file
812
third-party/vendor/png/src/common.rs
vendored
Normal file
|
|
@ -0,0 +1,812 @@
|
|||
//! Common types shared between the encoder and decoder
|
||||
use crate::text_metadata::{EncodableTextChunk, ITXtChunk, TEXtChunk, ZTXtChunk};
|
||||
use crate::{chunk, encoder};
|
||||
use io::Write;
|
||||
use std::{borrow::Cow, convert::TryFrom, fmt, io};
|
||||
|
||||
/// Describes how a pixel is encoded.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum ColorType {
|
||||
/// 1 grayscale sample.
|
||||
Grayscale = 0,
|
||||
/// 1 red sample, 1 green sample, 1 blue sample.
|
||||
Rgb = 2,
|
||||
/// 1 sample for the palette index.
|
||||
Indexed = 3,
|
||||
/// 1 grayscale sample, then 1 alpha sample.
|
||||
GrayscaleAlpha = 4,
|
||||
/// 1 red sample, 1 green sample, 1 blue sample, and finally, 1 alpha sample.
|
||||
Rgba = 6,
|
||||
}
|
||||
|
||||
impl ColorType {
|
||||
/// Returns the number of samples used per pixel encoded in this way.
|
||||
pub fn samples(self) -> usize {
|
||||
self.samples_u8().into()
|
||||
}
|
||||
|
||||
pub(crate) fn samples_u8(self) -> u8 {
|
||||
use self::ColorType::*;
|
||||
match self {
|
||||
Grayscale | Indexed => 1,
|
||||
Rgb => 3,
|
||||
GrayscaleAlpha => 2,
|
||||
Rgba => 4,
|
||||
}
|
||||
}
|
||||
|
||||
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
|
||||
pub fn from_u8(n: u8) -> Option<ColorType> {
|
||||
match n {
|
||||
0 => Some(ColorType::Grayscale),
|
||||
2 => Some(ColorType::Rgb),
|
||||
3 => Some(ColorType::Indexed),
|
||||
4 => Some(ColorType::GrayscaleAlpha),
|
||||
6 => Some(ColorType::Rgba),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn checked_raw_row_length(self, depth: BitDepth, width: u32) -> Option<usize> {
|
||||
// No overflow can occur in 64 bits, we multiply 32-bit with 5 more bits.
|
||||
let bits = u64::from(width) * u64::from(self.samples_u8()) * u64::from(depth.into_u8());
|
||||
TryFrom::try_from(1 + (bits + 7) / 8).ok()
|
||||
}
|
||||
|
||||
pub(crate) fn raw_row_length_from_width(self, depth: BitDepth, width: u32) -> usize {
|
||||
let samples = width as usize * self.samples();
|
||||
1 + match depth {
|
||||
BitDepth::Sixteen => samples * 2,
|
||||
BitDepth::Eight => samples,
|
||||
subbyte => {
|
||||
let samples_per_byte = 8 / subbyte as usize;
|
||||
let whole = samples / samples_per_byte;
|
||||
let fract = usize::from(samples % samples_per_byte > 0);
|
||||
whole + fract
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_combination_invalid(self, bit_depth: BitDepth) -> bool {
|
||||
// Section 11.2.2 of the PNG standard disallows several combinations
|
||||
// of bit depth and color type
|
||||
((bit_depth == BitDepth::One || bit_depth == BitDepth::Two || bit_depth == BitDepth::Four)
|
||||
&& (self == ColorType::Rgb
|
||||
|| self == ColorType::GrayscaleAlpha
|
||||
|| self == ColorType::Rgba))
|
||||
|| (bit_depth == BitDepth::Sixteen && self == ColorType::Indexed)
|
||||
}
|
||||
}
|
||||
|
||||
/// Bit depth of the PNG file.
|
||||
/// Specifies the number of bits per sample.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum BitDepth {
|
||||
One = 1,
|
||||
Two = 2,
|
||||
Four = 4,
|
||||
Eight = 8,
|
||||
Sixteen = 16,
|
||||
}
|
||||
|
||||
/// Internal count of bytes per pixel.
|
||||
/// This is used for filtering which never uses sub-byte units. This essentially reduces the number
|
||||
/// of possible byte chunk lengths to a very small set of values appropriate to be defined as an
|
||||
/// enum.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[repr(u8)]
|
||||
pub(crate) enum BytesPerPixel {
|
||||
One = 1,
|
||||
Two = 2,
|
||||
Three = 3,
|
||||
Four = 4,
|
||||
Six = 6,
|
||||
Eight = 8,
|
||||
}
|
||||
|
||||
impl BitDepth {
|
||||
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
|
||||
pub fn from_u8(n: u8) -> Option<BitDepth> {
|
||||
match n {
|
||||
1 => Some(BitDepth::One),
|
||||
2 => Some(BitDepth::Two),
|
||||
4 => Some(BitDepth::Four),
|
||||
8 => Some(BitDepth::Eight),
|
||||
16 => Some(BitDepth::Sixteen),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_u8(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
}
|
||||
|
||||
/// Pixel dimensions information
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct PixelDimensions {
|
||||
/// Pixels per unit, X axis
|
||||
pub xppu: u32,
|
||||
/// Pixels per unit, Y axis
|
||||
pub yppu: u32,
|
||||
/// Either *Meter* or *Unspecified*
|
||||
pub unit: Unit,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
/// Physical unit of the pixel dimensions
|
||||
pub enum Unit {
|
||||
Unspecified = 0,
|
||||
Meter = 1,
|
||||
}
|
||||
|
||||
impl Unit {
|
||||
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
|
||||
pub fn from_u8(n: u8) -> Option<Unit> {
|
||||
match n {
|
||||
0 => Some(Unit::Unspecified),
|
||||
1 => Some(Unit::Meter),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// How to reset buffer of an animated png (APNG) at the end of a frame.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum DisposeOp {
|
||||
/// Leave the buffer unchanged.
|
||||
None = 0,
|
||||
/// Clear buffer with the background color.
|
||||
Background = 1,
|
||||
/// Reset the buffer to the state before the current frame.
|
||||
Previous = 2,
|
||||
}
|
||||
|
||||
impl DisposeOp {
|
||||
/// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
|
||||
pub fn from_u8(n: u8) -> Option<DisposeOp> {
|
||||
match n {
|
||||
0 => Some(DisposeOp::None),
|
||||
1 => Some(DisposeOp::Background),
|
||||
2 => Some(DisposeOp::Previous),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DisposeOp {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let name = match *self {
|
||||
DisposeOp::None => "DISPOSE_OP_NONE",
|
||||
DisposeOp::Background => "DISPOSE_OP_BACKGROUND",
|
||||
DisposeOp::Previous => "DISPOSE_OP_PREVIOUS",
|
||||
};
|
||||
write!(f, "{}", name)
|
||||
}
|
||||
}
|
||||
|
||||
/// How pixels are written into the buffer.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum BlendOp {
|
||||
/// Pixels overwrite the value at their position.
|
||||
Source = 0,
|
||||
/// The new pixels are blended into the current state based on alpha.
|
||||
Over = 1,
|
||||
}
|
||||
|
||||
impl BlendOp {
|
||||
/// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
|
||||
pub fn from_u8(n: u8) -> Option<BlendOp> {
|
||||
match n {
|
||||
0 => Some(BlendOp::Source),
|
||||
1 => Some(BlendOp::Over),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BlendOp {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let name = match *self {
|
||||
BlendOp::Source => "BLEND_OP_SOURCE",
|
||||
BlendOp::Over => "BLEND_OP_OVER",
|
||||
};
|
||||
write!(f, "{}", name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Frame control information
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct FrameControl {
|
||||
/// Sequence number of the animation chunk, starting from 0
|
||||
pub sequence_number: u32,
|
||||
/// Width of the following frame
|
||||
pub width: u32,
|
||||
/// Height of the following frame
|
||||
pub height: u32,
|
||||
/// X position at which to render the following frame
|
||||
pub x_offset: u32,
|
||||
/// Y position at which to render the following frame
|
||||
pub y_offset: u32,
|
||||
/// Frame delay fraction numerator
|
||||
pub delay_num: u16,
|
||||
/// Frame delay fraction denominator
|
||||
pub delay_den: u16,
|
||||
/// Type of frame area disposal to be done after rendering this frame
|
||||
pub dispose_op: DisposeOp,
|
||||
/// Type of frame area rendering for this frame
|
||||
pub blend_op: BlendOp,
|
||||
}
|
||||
|
||||
impl Default for FrameControl {
|
||||
fn default() -> FrameControl {
|
||||
FrameControl {
|
||||
sequence_number: 0,
|
||||
width: 0,
|
||||
height: 0,
|
||||
x_offset: 0,
|
||||
y_offset: 0,
|
||||
delay_num: 1,
|
||||
delay_den: 30,
|
||||
dispose_op: DisposeOp::None,
|
||||
blend_op: BlendOp::Source,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FrameControl {
|
||||
pub fn set_seq_num(&mut self, s: u32) {
|
||||
self.sequence_number = s;
|
||||
}
|
||||
|
||||
pub fn inc_seq_num(&mut self, i: u32) {
|
||||
self.sequence_number += i;
|
||||
}
|
||||
|
||||
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
|
||||
let mut data = [0u8; 26];
|
||||
data[..4].copy_from_slice(&self.sequence_number.to_be_bytes());
|
||||
data[4..8].copy_from_slice(&self.width.to_be_bytes());
|
||||
data[8..12].copy_from_slice(&self.height.to_be_bytes());
|
||||
data[12..16].copy_from_slice(&self.x_offset.to_be_bytes());
|
||||
data[16..20].copy_from_slice(&self.y_offset.to_be_bytes());
|
||||
data[20..22].copy_from_slice(&self.delay_num.to_be_bytes());
|
||||
data[22..24].copy_from_slice(&self.delay_den.to_be_bytes());
|
||||
data[24] = self.dispose_op as u8;
|
||||
data[25] = self.blend_op as u8;
|
||||
|
||||
encoder::write_chunk(w, chunk::fcTL, &data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Animation control information
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct AnimationControl {
|
||||
/// Number of frames
|
||||
pub num_frames: u32,
|
||||
/// Number of times to loop this APNG. 0 indicates infinite looping.
|
||||
pub num_plays: u32,
|
||||
}
|
||||
|
||||
impl AnimationControl {
|
||||
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
|
||||
let mut data = [0; 8];
|
||||
data[..4].copy_from_slice(&self.num_frames.to_be_bytes());
|
||||
data[4..].copy_from_slice(&self.num_plays.to_be_bytes());
|
||||
encoder::write_chunk(w, chunk::acTL, &data)
|
||||
}
|
||||
}
|
||||
|
||||
/// The type and strength of applied compression.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum Compression {
|
||||
/// Default level
|
||||
Default,
|
||||
/// Fast minimal compression
|
||||
Fast,
|
||||
/// Higher compression level
|
||||
///
|
||||
/// Best in this context isn't actually the highest possible level
|
||||
/// the encoder can do, but is meant to emulate the `Best` setting in the `Flate2`
|
||||
/// library.
|
||||
Best,
|
||||
#[deprecated(
|
||||
since = "0.17.6",
|
||||
note = "use one of the other compression levels instead, such as 'fast'"
|
||||
)]
|
||||
Huffman,
|
||||
#[deprecated(
|
||||
since = "0.17.6",
|
||||
note = "use one of the other compression levels instead, such as 'fast'"
|
||||
)]
|
||||
Rle,
|
||||
}
|
||||
|
||||
impl Default for Compression {
|
||||
fn default() -> Self {
|
||||
Self::Default
|
||||
}
|
||||
}
|
||||
|
||||
/// An unsigned integer scaled version of a floating point value,
|
||||
/// equivalent to an integer quotient with fixed denominator (100_000)).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct ScaledFloat(u32);
|
||||
|
||||
impl ScaledFloat {
|
||||
const SCALING: f32 = 100_000.0;
|
||||
|
||||
/// Gets whether the value is within the clamped range of this type.
|
||||
pub fn in_range(value: f32) -> bool {
|
||||
value >= 0.0 && (value * Self::SCALING).floor() <= std::u32::MAX as f32
|
||||
}
|
||||
|
||||
/// Gets whether the value can be exactly converted in round-trip.
|
||||
#[allow(clippy::float_cmp)] // Stupid tool, the exact float compare is _the entire point_.
|
||||
pub fn exact(value: f32) -> bool {
|
||||
let there = Self::forward(value);
|
||||
let back = Self::reverse(there);
|
||||
value == back
|
||||
}
|
||||
|
||||
fn forward(value: f32) -> u32 {
|
||||
(value.max(0.0) * Self::SCALING).floor() as u32
|
||||
}
|
||||
|
||||
fn reverse(encoded: u32) -> f32 {
|
||||
encoded as f32 / Self::SCALING
|
||||
}
|
||||
|
||||
/// Slightly inaccurate scaling and quantization.
|
||||
/// Clamps the value into the representable range if it is negative or too large.
|
||||
pub fn new(value: f32) -> Self {
|
||||
Self(Self::forward(value))
|
||||
}
|
||||
|
||||
/// Fully accurate construction from a value scaled as per specification.
|
||||
pub fn from_scaled(val: u32) -> Self {
|
||||
Self(val)
|
||||
}
|
||||
|
||||
/// Get the accurate encoded value.
|
||||
pub fn into_scaled(self) -> u32 {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Get the unscaled value as a floating point.
|
||||
pub fn into_value(self) -> f32 {
|
||||
Self::reverse(self.0)
|
||||
}
|
||||
|
||||
pub(crate) fn encode_gama<W: Write>(self, w: &mut W) -> encoder::Result<()> {
|
||||
encoder::write_chunk(w, chunk::gAMA, &self.into_scaled().to_be_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
/// Chromaticities of the color space primaries
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct SourceChromaticities {
|
||||
pub white: (ScaledFloat, ScaledFloat),
|
||||
pub red: (ScaledFloat, ScaledFloat),
|
||||
pub green: (ScaledFloat, ScaledFloat),
|
||||
pub blue: (ScaledFloat, ScaledFloat),
|
||||
}
|
||||
|
||||
impl SourceChromaticities {
|
||||
pub fn new(white: (f32, f32), red: (f32, f32), green: (f32, f32), blue: (f32, f32)) -> Self {
|
||||
SourceChromaticities {
|
||||
white: (ScaledFloat::new(white.0), ScaledFloat::new(white.1)),
|
||||
red: (ScaledFloat::new(red.0), ScaledFloat::new(red.1)),
|
||||
green: (ScaledFloat::new(green.0), ScaledFloat::new(green.1)),
|
||||
blue: (ScaledFloat::new(blue.0), ScaledFloat::new(blue.1)),
|
||||
}
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub fn to_be_bytes(self) -> [u8; 32] {
|
||||
let white_x = self.white.0.into_scaled().to_be_bytes();
|
||||
let white_y = self.white.1.into_scaled().to_be_bytes();
|
||||
let red_x = self.red.0.into_scaled().to_be_bytes();
|
||||
let red_y = self.red.1.into_scaled().to_be_bytes();
|
||||
let green_x = self.green.0.into_scaled().to_be_bytes();
|
||||
let green_y = self.green.1.into_scaled().to_be_bytes();
|
||||
let blue_x = self.blue.0.into_scaled().to_be_bytes();
|
||||
let blue_y = self.blue.1.into_scaled().to_be_bytes();
|
||||
[
|
||||
white_x[0], white_x[1], white_x[2], white_x[3],
|
||||
white_y[0], white_y[1], white_y[2], white_y[3],
|
||||
red_x[0], red_x[1], red_x[2], red_x[3],
|
||||
red_y[0], red_y[1], red_y[2], red_y[3],
|
||||
green_x[0], green_x[1], green_x[2], green_x[3],
|
||||
green_y[0], green_y[1], green_y[2], green_y[3],
|
||||
blue_x[0], blue_x[1], blue_x[2], blue_x[3],
|
||||
blue_y[0], blue_y[1], blue_y[2], blue_y[3],
|
||||
]
|
||||
}
|
||||
|
||||
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
|
||||
encoder::write_chunk(w, chunk::cHRM, &self.to_be_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
/// The rendering intent for an sRGB image.
|
||||
///
|
||||
/// Presence of this data also indicates that the image conforms to the sRGB color space.
|
||||
#[repr(u8)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum SrgbRenderingIntent {
|
||||
/// For images preferring good adaptation to the output device gamut at the expense of colorimetric accuracy, such as photographs.
|
||||
Perceptual = 0,
|
||||
/// For images requiring colour appearance matching (relative to the output device white point), such as logos.
|
||||
RelativeColorimetric = 1,
|
||||
/// For images preferring preservation of saturation at the expense of hue and lightness, such as charts and graphs.
|
||||
Saturation = 2,
|
||||
/// For images requiring preservation of absolute colorimetry, such as previews of images destined for a different output device (proofs).
|
||||
AbsoluteColorimetric = 3,
|
||||
}
|
||||
|
||||
impl SrgbRenderingIntent {
|
||||
pub(crate) fn into_raw(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_raw(raw: u8) -> Option<Self> {
|
||||
match raw {
|
||||
0 => Some(SrgbRenderingIntent::Perceptual),
|
||||
1 => Some(SrgbRenderingIntent::RelativeColorimetric),
|
||||
2 => Some(SrgbRenderingIntent::Saturation),
|
||||
3 => Some(SrgbRenderingIntent::AbsoluteColorimetric),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
|
||||
encoder::write_chunk(w, chunk::sRGB, &[self.into_raw()])
|
||||
}
|
||||
}
|
||||
|
||||
/// PNG info struct
|
||||
#[derive(Clone, Debug)]
|
||||
#[non_exhaustive]
|
||||
pub struct Info<'a> {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub bit_depth: BitDepth,
|
||||
/// How colors are stored in the image.
|
||||
pub color_type: ColorType,
|
||||
pub interlaced: bool,
|
||||
/// The image's `tRNS` chunk, if present; contains the alpha channel of the image's palette, 1 byte per entry.
|
||||
pub trns: Option<Cow<'a, [u8]>>,
|
||||
pub pixel_dims: Option<PixelDimensions>,
|
||||
/// The image's `PLTE` chunk, if present; contains the RGB channels (in that order) of the image's palettes, 3 bytes per entry (1 per channel).
|
||||
pub palette: Option<Cow<'a, [u8]>>,
|
||||
/// The contents of the image's gAMA chunk, if present.
|
||||
/// Prefer `source_gamma` to also get the derived replacement gamma from sRGB chunks.
|
||||
pub gama_chunk: Option<ScaledFloat>,
|
||||
/// The contents of the image's `cHRM` chunk, if present.
|
||||
/// Prefer `source_chromaticities` to also get the derived replacements from sRGB chunks.
|
||||
pub chrm_chunk: Option<SourceChromaticities>,
|
||||
|
||||
pub frame_control: Option<FrameControl>,
|
||||
pub animation_control: Option<AnimationControl>,
|
||||
pub compression: Compression,
|
||||
/// Gamma of the source system.
|
||||
/// Set by both `gAMA` as well as to a replacement by `sRGB` chunk.
|
||||
pub source_gamma: Option<ScaledFloat>,
|
||||
/// Chromaticities of the source system.
|
||||
/// Set by both `cHRM` as well as to a replacement by `sRGB` chunk.
|
||||
pub source_chromaticities: Option<SourceChromaticities>,
|
||||
/// The rendering intent of an SRGB image.
|
||||
///
|
||||
/// Presence of this value also indicates that the image conforms to the SRGB color space.
|
||||
pub srgb: Option<SrgbRenderingIntent>,
|
||||
/// The ICC profile for the image.
|
||||
pub icc_profile: Option<Cow<'a, [u8]>>,
|
||||
/// tEXt field
|
||||
pub uncompressed_latin1_text: Vec<TEXtChunk>,
|
||||
/// zTXt field
|
||||
pub compressed_latin1_text: Vec<ZTXtChunk>,
|
||||
/// iTXt field
|
||||
pub utf8_text: Vec<ITXtChunk>,
|
||||
}
|
||||
|
||||
impl Default for Info<'_> {
|
||||
fn default() -> Info<'static> {
|
||||
Info {
|
||||
width: 0,
|
||||
height: 0,
|
||||
bit_depth: BitDepth::Eight,
|
||||
color_type: ColorType::Grayscale,
|
||||
interlaced: false,
|
||||
palette: None,
|
||||
trns: None,
|
||||
gama_chunk: None,
|
||||
chrm_chunk: None,
|
||||
pixel_dims: None,
|
||||
frame_control: None,
|
||||
animation_control: None,
|
||||
// Default to `deflate::Compression::Fast` and `filter::FilterType::Sub`
|
||||
// to maintain backward compatible output.
|
||||
compression: Compression::Fast,
|
||||
source_gamma: None,
|
||||
source_chromaticities: None,
|
||||
srgb: None,
|
||||
icc_profile: None,
|
||||
uncompressed_latin1_text: Vec::new(),
|
||||
compressed_latin1_text: Vec::new(),
|
||||
utf8_text: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Info<'_> {
|
||||
/// A utility constructor for a default info with width and height.
|
||||
pub fn with_size(width: u32, height: u32) -> Self {
|
||||
Info {
|
||||
width,
|
||||
height,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Size of the image, width then height.
|
||||
pub fn size(&self) -> (u32, u32) {
|
||||
(self.width, self.height)
|
||||
}
|
||||
|
||||
/// Returns true if the image is an APNG image.
|
||||
pub fn is_animated(&self) -> bool {
|
||||
self.frame_control.is_some() && self.animation_control.is_some()
|
||||
}
|
||||
|
||||
/// Returns the frame control information of the image.
|
||||
pub fn animation_control(&self) -> Option<&AnimationControl> {
|
||||
self.animation_control.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the frame control information of the current frame
|
||||
pub fn frame_control(&self) -> Option<&FrameControl> {
|
||||
self.frame_control.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the number of bits per pixel.
|
||||
pub fn bits_per_pixel(&self) -> usize {
|
||||
self.color_type.samples() * self.bit_depth as usize
|
||||
}
|
||||
|
||||
/// Returns the number of bytes per pixel.
|
||||
pub fn bytes_per_pixel(&self) -> usize {
|
||||
// If adjusting this for expansion or other transformation passes, remember to keep the old
|
||||
// implementation for bpp_in_prediction, which is internal to the png specification.
|
||||
self.color_type.samples() * ((self.bit_depth as usize + 7) >> 3)
|
||||
}
|
||||
|
||||
/// Return the number of bytes for this pixel used in prediction.
|
||||
///
|
||||
/// Some filters use prediction, over the raw bytes of a scanline. Where a previous pixel is
|
||||
/// require for such forms the specification instead references previous bytes. That is, for
|
||||
/// a gray pixel of bit depth 2, the pixel used in prediction is actually 4 pixels prior. This
|
||||
/// has the consequence that the number of possible values is rather small. To make this fact
|
||||
/// more obvious in the type system and the optimizer we use an explicit enum here.
|
||||
pub(crate) fn bpp_in_prediction(&self) -> BytesPerPixel {
|
||||
BytesPerPixel::from_usize(self.bytes_per_pixel())
|
||||
}
|
||||
|
||||
/// Returns the number of bytes needed for one deinterlaced image.
|
||||
pub fn raw_bytes(&self) -> usize {
|
||||
self.height as usize * self.raw_row_length()
|
||||
}
|
||||
|
||||
/// Returns the number of bytes needed for one deinterlaced row.
|
||||
pub fn raw_row_length(&self) -> usize {
|
||||
self.raw_row_length_from_width(self.width)
|
||||
}
|
||||
|
||||
pub(crate) fn checked_raw_row_length(&self) -> Option<usize> {
|
||||
self.color_type
|
||||
.checked_raw_row_length(self.bit_depth, self.width)
|
||||
}
|
||||
|
||||
/// Returns the number of bytes needed for one deinterlaced row of width `width`.
|
||||
pub fn raw_row_length_from_width(&self, width: u32) -> usize {
|
||||
self.color_type
|
||||
.raw_row_length_from_width(self.bit_depth, width)
|
||||
}
|
||||
|
||||
/// Encode this header to the writer.
|
||||
///
|
||||
/// Note that this does _not_ include the PNG signature, it starts with the IHDR chunk and then
|
||||
/// includes other chunks that were added to the header.
|
||||
pub fn encode<W: Write>(&self, mut w: W) -> encoder::Result<()> {
|
||||
// Encode the IHDR chunk
|
||||
let mut data = [0; 13];
|
||||
data[..4].copy_from_slice(&self.width.to_be_bytes());
|
||||
data[4..8].copy_from_slice(&self.height.to_be_bytes());
|
||||
data[8] = self.bit_depth as u8;
|
||||
data[9] = self.color_type as u8;
|
||||
data[12] = self.interlaced as u8;
|
||||
encoder::write_chunk(&mut w, chunk::IHDR, &data)?;
|
||||
// Encode the pHYs chunk
|
||||
if let Some(pd) = self.pixel_dims {
|
||||
let mut phys_data = [0; 9];
|
||||
phys_data[0..4].copy_from_slice(&pd.xppu.to_be_bytes());
|
||||
phys_data[4..8].copy_from_slice(&pd.yppu.to_be_bytes());
|
||||
match pd.unit {
|
||||
Unit::Meter => phys_data[8] = 1,
|
||||
Unit::Unspecified => phys_data[8] = 0,
|
||||
}
|
||||
encoder::write_chunk(&mut w, chunk::pHYs, &phys_data)?;
|
||||
}
|
||||
|
||||
if let Some(p) = &self.palette {
|
||||
encoder::write_chunk(&mut w, chunk::PLTE, p)?;
|
||||
};
|
||||
|
||||
if let Some(t) = &self.trns {
|
||||
encoder::write_chunk(&mut w, chunk::tRNS, t)?;
|
||||
}
|
||||
|
||||
// If specified, the sRGB information overrides the source gamma and chromaticities.
|
||||
if let Some(srgb) = &self.srgb {
|
||||
let gamma = crate::srgb::substitute_gamma();
|
||||
let chromaticities = crate::srgb::substitute_chromaticities();
|
||||
srgb.encode(&mut w)?;
|
||||
gamma.encode_gama(&mut w)?;
|
||||
chromaticities.encode(&mut w)?;
|
||||
} else {
|
||||
if let Some(gma) = self.source_gamma {
|
||||
gma.encode_gama(&mut w)?
|
||||
}
|
||||
if let Some(chrms) = self.source_chromaticities {
|
||||
chrms.encode(&mut w)?;
|
||||
}
|
||||
}
|
||||
if let Some(actl) = self.animation_control {
|
||||
actl.encode(&mut w)?;
|
||||
}
|
||||
|
||||
for text_chunk in &self.uncompressed_latin1_text {
|
||||
text_chunk.encode(&mut w)?;
|
||||
}
|
||||
|
||||
for text_chunk in &self.compressed_latin1_text {
|
||||
text_chunk.encode(&mut w)?;
|
||||
}
|
||||
|
||||
for text_chunk in &self.utf8_text {
|
||||
text_chunk.encode(&mut w)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl BytesPerPixel {
|
||||
pub(crate) fn from_usize(bpp: usize) -> Self {
|
||||
match bpp {
|
||||
1 => BytesPerPixel::One,
|
||||
2 => BytesPerPixel::Two,
|
||||
3 => BytesPerPixel::Three,
|
||||
4 => BytesPerPixel::Four,
|
||||
6 => BytesPerPixel::Six, // Only rgb×16bit
|
||||
8 => BytesPerPixel::Eight, // Only rgba×16bit
|
||||
_ => unreachable!("Not a possible byte rounded pixel width"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_usize(self) -> usize {
|
||||
self as usize
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// Output transformations
|
||||
///
|
||||
/// Many flags from libpng are not yet supported. A PR discussing/adding them would be nice.
|
||||
///
|
||||
#[doc = "
|
||||
```c
|
||||
/// Discard the alpha channel
|
||||
const STRIP_ALPHA = 0x0002; // read only
|
||||
/// Expand 1; 2 and 4-bit samples to bytes
|
||||
const PACKING = 0x0004; // read and write
|
||||
/// Change order of packed pixels to LSB first
|
||||
const PACKSWAP = 0x0008; // read and write
|
||||
/// Invert monochrome images
|
||||
const INVERT_MONO = 0x0020; // read and write
|
||||
/// Normalize pixels to the sBIT depth
|
||||
const SHIFT = 0x0040; // read and write
|
||||
/// Flip RGB to BGR; RGBA to BGRA
|
||||
const BGR = 0x0080; // read and write
|
||||
/// Flip RGBA to ARGB or GA to AG
|
||||
const SWAP_ALPHA = 0x0100; // read and write
|
||||
/// Byte-swap 16-bit samples
|
||||
const SWAP_ENDIAN = 0x0200; // read and write
|
||||
/// Change alpha from opacity to transparency
|
||||
const INVERT_ALPHA = 0x0400; // read and write
|
||||
const STRIP_FILLER = 0x0800; // write only
|
||||
const STRIP_FILLER_BEFORE = 0x0800; // write only
|
||||
const STRIP_FILLER_AFTER = 0x1000; // write only
|
||||
const GRAY_TO_RGB = 0x2000; // read only
|
||||
const EXPAND_16 = 0x4000; // read only
|
||||
/// Similar to STRIP_16 but in libpng considering gamma?
|
||||
/// Not entirely sure the documentation says it is more
|
||||
/// accurate but doesn't say precisely how.
|
||||
const SCALE_16 = 0x8000; // read only
|
||||
```
|
||||
"]
|
||||
pub struct Transformations: u32 {
|
||||
/// No transformation
|
||||
const IDENTITY = 0x00000; // read and write */
|
||||
/// Strip 16-bit samples to 8 bits
|
||||
const STRIP_16 = 0x00001; // read only */
|
||||
/// Expand paletted images to RGB; expand grayscale images of
|
||||
/// less than 8-bit depth to 8-bit depth; and expand tRNS chunks
|
||||
/// to alpha channels.
|
||||
const EXPAND = 0x00010; // read only */
|
||||
/// Expand paletted images to include an alpha channel. Implies `EXPAND`.
|
||||
const ALPHA = 0x10000; // read only */
|
||||
}
|
||||
}
|
||||
|
||||
impl Transformations {
|
||||
/// Transform every input to 8bit grayscale or color.
|
||||
///
|
||||
/// This sets `EXPAND` and `STRIP_16` which is similar to the default transformation used by
|
||||
/// this library prior to `0.17`.
|
||||
pub fn normalize_to_color8() -> Transformations {
|
||||
Transformations::EXPAND | Transformations::STRIP_16
|
||||
}
|
||||
}
|
||||
|
||||
/// Instantiate the default transformations, the identity transform.
|
||||
impl Default for Transformations {
|
||||
fn default() -> Transformations {
|
||||
Transformations::IDENTITY
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ParameterError {
|
||||
inner: ParameterErrorKind,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ParameterErrorKind {
|
||||
/// A provided buffer must be have the exact size to hold the image data. Where the buffer can
|
||||
/// be allocated by the caller, they must ensure that it has a minimum size as hinted previously.
|
||||
/// Even though the size is calculated from image data, this does counts as a parameter error
|
||||
/// because they must react to a value produced by this library, which can have been subjected
|
||||
/// to limits.
|
||||
ImageBufferSize { expected: usize, actual: usize },
|
||||
/// A bit like return `None` from an iterator.
|
||||
/// We use it to differentiate between failing to seek to the next image in a sequence and the
|
||||
/// absence of a next image. This is an error of the caller because they should have checked
|
||||
/// the number of images by inspecting the header data returned when opening the image. This
|
||||
/// library will perform the checks necessary to ensure that data was accurate or error with a
|
||||
/// format error otherwise.
|
||||
PolledAfterEndOfImage,
|
||||
}
|
||||
|
||||
impl From<ParameterErrorKind> for ParameterError {
|
||||
fn from(inner: ParameterErrorKind) -> Self {
|
||||
ParameterError { inner }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ParameterError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
use ParameterErrorKind::*;
|
||||
match self.inner {
|
||||
ImageBufferSize { expected, actual } => {
|
||||
write!(fmt, "wrong data size, expected {} got {}", expected, actual)
|
||||
}
|
||||
PolledAfterEndOfImage => write!(fmt, "End of image has been reached"),
|
||||
}
|
||||
}
|
||||
}
|
||||
811
third-party/vendor/png/src/decoder/mod.rs
vendored
Normal file
811
third-party/vendor/png/src/decoder/mod.rs
vendored
Normal file
|
|
@ -0,0 +1,811 @@
|
|||
mod stream;
|
||||
pub(crate) mod transform;
|
||||
mod zlib;
|
||||
|
||||
pub use self::stream::{DecodeOptions, Decoded, DecodingError, StreamingDecoder};
|
||||
use self::stream::{FormatErrorInner, CHUNCK_BUFFER_SIZE};
|
||||
use self::transform::{create_transform_fn, TransformFn};
|
||||
|
||||
use std::io::{BufRead, BufReader, Read};
|
||||
use std::mem;
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::adam7;
|
||||
use crate::chunk;
|
||||
use crate::common::{
|
||||
BitDepth, BytesPerPixel, ColorType, Info, ParameterErrorKind, Transformations,
|
||||
};
|
||||
use crate::filter::{unfilter, FilterType};
|
||||
|
||||
/*
|
||||
pub enum InterlaceHandling {
|
||||
/// Outputs the raw rows
|
||||
RawRows,
|
||||
/// Fill missing the pixels from the existing ones
|
||||
Rectangle,
|
||||
/// Only fill the needed pixels
|
||||
Sparkle
|
||||
}
|
||||
*/
|
||||
|
||||
/// Output info.
|
||||
///
|
||||
/// This describes one particular frame of the image that was written into the output buffer.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct OutputInfo {
|
||||
/// The pixel width of this frame.
|
||||
pub width: u32,
|
||||
/// The pixel height of this frame.
|
||||
pub height: u32,
|
||||
/// The chosen output color type.
|
||||
pub color_type: ColorType,
|
||||
/// The chosen output bit depth.
|
||||
pub bit_depth: BitDepth,
|
||||
/// The byte count of each scan line in the image.
|
||||
pub line_size: usize,
|
||||
}
|
||||
|
||||
impl OutputInfo {
|
||||
/// Returns the size needed to hold a decoded frame
|
||||
/// If the output buffer was larger then bytes after this count should be ignored. They may
|
||||
/// still have been changed.
|
||||
pub fn buffer_size(&self) -> usize {
|
||||
self.line_size * self.height as usize
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
/// Limits on the resources the `Decoder` is allowed too use
|
||||
pub struct Limits {
|
||||
/// maximum number of bytes the decoder is allowed to allocate, default is 64Mib
|
||||
pub bytes: usize,
|
||||
}
|
||||
|
||||
impl Limits {
|
||||
pub(crate) fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
|
||||
if self.bytes >= bytes {
|
||||
self.bytes -= bytes;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(DecodingError::LimitsExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Limits {
|
||||
fn default() -> Limits {
|
||||
Limits {
|
||||
bytes: 1024 * 1024 * 64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// PNG Decoder
|
||||
pub struct Decoder<R: Read> {
|
||||
read_decoder: ReadDecoder<R>,
|
||||
/// Output transformations
|
||||
transform: Transformations,
|
||||
}
|
||||
|
||||
/// A row of data with interlace information attached.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct InterlacedRow<'data> {
|
||||
data: &'data [u8],
|
||||
interlace: InterlaceInfo,
|
||||
}
|
||||
|
||||
impl<'data> InterlacedRow<'data> {
|
||||
pub fn data(&self) -> &'data [u8] {
|
||||
self.data
|
||||
}
|
||||
|
||||
pub fn interlace(&self) -> InterlaceInfo {
|
||||
self.interlace
|
||||
}
|
||||
}
|
||||
|
||||
/// PNG (2003) specifies two interlace modes, but reserves future extensions.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum InterlaceInfo {
|
||||
/// the null method means no interlacing
|
||||
Null,
|
||||
/// Adam7 derives its name from doing 7 passes over the image, only decoding a subset of all pixels in each pass.
|
||||
/// The following table shows pictorially what parts of each 8x8 area of the image is found in each pass:
|
||||
///
|
||||
/// 1 6 4 6 2 6 4 6
|
||||
/// 7 7 7 7 7 7 7 7
|
||||
/// 5 6 5 6 5 6 5 6
|
||||
/// 7 7 7 7 7 7 7 7
|
||||
/// 3 6 4 6 3 6 4 6
|
||||
/// 7 7 7 7 7 7 7 7
|
||||
/// 5 6 5 6 5 6 5 6
|
||||
/// 7 7 7 7 7 7 7 7
|
||||
Adam7 { pass: u8, line: u32, width: u32 },
|
||||
}
|
||||
|
||||
/// A row of data without interlace information.
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Row<'data> {
|
||||
data: &'data [u8],
|
||||
}
|
||||
|
||||
impl<'data> Row<'data> {
|
||||
pub fn data(&self) -> &'data [u8] {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Decoder<R> {
|
||||
/// Create a new decoder configuration with default limits.
|
||||
pub fn new(r: R) -> Decoder<R> {
|
||||
Decoder::new_with_limits(r, Limits::default())
|
||||
}
|
||||
|
||||
/// Create a new decoder configuration with custom limits.
|
||||
pub fn new_with_limits(r: R, limits: Limits) -> Decoder<R> {
|
||||
let mut decoder = StreamingDecoder::new();
|
||||
decoder.limits = limits;
|
||||
|
||||
Decoder {
|
||||
read_decoder: ReadDecoder {
|
||||
reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r),
|
||||
decoder,
|
||||
at_eof: false,
|
||||
},
|
||||
transform: Transformations::IDENTITY,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new decoder configuration with custom `DecodeOptions`.
|
||||
pub fn new_with_options(r: R, decode_options: DecodeOptions) -> Decoder<R> {
|
||||
let mut decoder = StreamingDecoder::new_with_options(decode_options);
|
||||
decoder.limits = Limits::default();
|
||||
|
||||
Decoder {
|
||||
read_decoder: ReadDecoder {
|
||||
reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r),
|
||||
decoder,
|
||||
at_eof: false,
|
||||
},
|
||||
transform: Transformations::IDENTITY,
|
||||
}
|
||||
}
|
||||
|
||||
/// Limit resource usage.
|
||||
///
|
||||
/// Note that your allocations, e.g. when reading into a pre-allocated buffer, are __NOT__
|
||||
/// considered part of the limits. Nevertheless, required intermediate buffers such as for
|
||||
/// singular lines is checked against the limit.
|
||||
///
|
||||
/// Note that this is a best-effort basis.
|
||||
///
|
||||
/// ```
|
||||
/// use std::fs::File;
|
||||
/// use png::{Decoder, Limits};
|
||||
/// // This image is 32×32, 1bit per pixel. The reader buffers one row which requires 4 bytes.
|
||||
/// let mut limits = Limits::default();
|
||||
/// limits.bytes = 3;
|
||||
/// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits);
|
||||
/// assert!(decoder.read_info().is_err());
|
||||
///
|
||||
/// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib
|
||||
/// let mut limits = Limits::default();
|
||||
/// limits.bytes = 10*1024;
|
||||
/// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits);
|
||||
/// assert!(decoder.read_info().is_ok());
|
||||
/// ```
|
||||
pub fn set_limits(&mut self, limits: Limits) {
|
||||
self.read_decoder.decoder.limits = limits;
|
||||
}
|
||||
|
||||
/// Read the PNG header and return the information contained within.
|
||||
///
|
||||
/// Most image metadata will not be read until `read_info` is called, so those fields will be
|
||||
/// None or empty.
|
||||
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
|
||||
let mut buf = Vec::new();
|
||||
while self.read_decoder.info().is_none() {
|
||||
buf.clear();
|
||||
if self.read_decoder.decode_next(&mut buf)?.is_none() {
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::UnexpectedEof.into(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(self.read_decoder.info().unwrap())
|
||||
}
|
||||
|
||||
/// Reads all meta data until the first IDAT chunk
|
||||
pub fn read_info(mut self) -> Result<Reader<R>, DecodingError> {
|
||||
self.read_header_info()?;
|
||||
|
||||
let mut reader = Reader {
|
||||
decoder: self.read_decoder,
|
||||
bpp: BytesPerPixel::One,
|
||||
subframe: SubframeInfo::not_yet_init(),
|
||||
fctl_read: 0,
|
||||
next_frame: SubframeIdx::Initial,
|
||||
data_stream: Vec::new(),
|
||||
prev_start: 0,
|
||||
current_start: 0,
|
||||
transform: self.transform,
|
||||
transform_fn: None,
|
||||
scratch_buffer: Vec::new(),
|
||||
};
|
||||
|
||||
// Check if the decoding buffer of a single raw line has a valid size.
|
||||
if reader.info().checked_raw_row_length().is_none() {
|
||||
return Err(DecodingError::LimitsExceeded);
|
||||
}
|
||||
|
||||
// Check if the output buffer has a valid size.
|
||||
let (width, height) = reader.info().size();
|
||||
let (color, depth) = reader.output_color_type();
|
||||
let rowlen = color
|
||||
.checked_raw_row_length(depth, width)
|
||||
.ok_or(DecodingError::LimitsExceeded)?
|
||||
- 1;
|
||||
let height: usize =
|
||||
std::convert::TryFrom::try_from(height).map_err(|_| DecodingError::LimitsExceeded)?;
|
||||
if rowlen.checked_mul(height).is_none() {
|
||||
return Err(DecodingError::LimitsExceeded);
|
||||
}
|
||||
|
||||
reader.read_until_image_data()?;
|
||||
Ok(reader)
|
||||
}
|
||||
|
||||
/// Set the allowed and performed transformations.
|
||||
///
|
||||
/// A transformation is a pre-processing on the raw image data modifying content or encoding.
|
||||
/// Many options have an impact on memory or CPU usage during decoding.
|
||||
pub fn set_transformations(&mut self, transform: Transformations) {
|
||||
self.transform = transform;
|
||||
}
|
||||
|
||||
/// Set the decoder to ignore all text chunks while parsing.
|
||||
///
|
||||
/// eg.
|
||||
/// ```
|
||||
/// use std::fs::File;
|
||||
/// use png::Decoder;
|
||||
/// let mut decoder = Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
|
||||
/// decoder.set_ignore_text_chunk(true);
|
||||
/// assert!(decoder.read_info().is_ok());
|
||||
/// ```
|
||||
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
|
||||
self.read_decoder
|
||||
.decoder
|
||||
.set_ignore_text_chunk(ignore_text_chunk);
|
||||
}
|
||||
|
||||
/// Set the decoder to ignore and not verify the Adler-32 checksum
|
||||
/// and CRC code.
|
||||
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
|
||||
self.read_decoder
|
||||
.decoder
|
||||
.set_ignore_adler32(ignore_checksums);
|
||||
self.read_decoder.decoder.set_ignore_crc(ignore_checksums);
|
||||
}
|
||||
}
|
||||
|
||||
struct ReadDecoder<R: Read> {
|
||||
reader: BufReader<R>,
|
||||
decoder: StreamingDecoder,
|
||||
at_eof: bool,
|
||||
}
|
||||
|
||||
impl<R: Read> ReadDecoder<R> {
|
||||
/// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written
|
||||
/// into image_data.
|
||||
fn decode_next(&mut self, image_data: &mut Vec<u8>) -> Result<Option<Decoded>, DecodingError> {
|
||||
while !self.at_eof {
|
||||
let (consumed, result) = {
|
||||
let buf = self.reader.fill_buf()?;
|
||||
if buf.is_empty() {
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::UnexpectedEof.into(),
|
||||
));
|
||||
}
|
||||
self.decoder.update(buf, image_data)?
|
||||
};
|
||||
self.reader.consume(consumed);
|
||||
match result {
|
||||
Decoded::Nothing => (),
|
||||
Decoded::ImageEnd => self.at_eof = true,
|
||||
result => return Ok(Some(result)),
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn finish_decoding(&mut self) -> Result<(), DecodingError> {
|
||||
while !self.at_eof {
|
||||
let buf = self.reader.fill_buf()?;
|
||||
if buf.is_empty() {
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::UnexpectedEof.into(),
|
||||
));
|
||||
}
|
||||
let (consumed, event) = self.decoder.update(buf, &mut vec![])?;
|
||||
self.reader.consume(consumed);
|
||||
match event {
|
||||
Decoded::Nothing => (),
|
||||
Decoded::ImageEnd => self.at_eof = true,
|
||||
// ignore more data
|
||||
Decoded::ChunkComplete(_, _) | Decoded::ChunkBegin(_, _) | Decoded::ImageData => {}
|
||||
Decoded::ImageDataFlushed => return Ok(()),
|
||||
Decoded::PartialChunk(_) => {}
|
||||
new => unreachable!("{:?}", new),
|
||||
}
|
||||
}
|
||||
|
||||
Err(DecodingError::Format(
|
||||
FormatErrorInner::UnexpectedEof.into(),
|
||||
))
|
||||
}
|
||||
|
||||
fn info(&self) -> Option<&Info<'static>> {
|
||||
self.decoder.info.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// PNG reader (mostly high-level interface)
|
||||
///
|
||||
/// Provides a high level that iterates over lines or whole images.
|
||||
pub struct Reader<R: Read> {
|
||||
decoder: ReadDecoder<R>,
|
||||
bpp: BytesPerPixel,
|
||||
subframe: SubframeInfo,
|
||||
/// Number of frame control chunks read.
|
||||
/// By the APNG specification the total number must equal the count specified in the animation
|
||||
/// control chunk. The IDAT image _may_ have such a chunk applying to it.
|
||||
fctl_read: u32,
|
||||
next_frame: SubframeIdx,
|
||||
/// Vec containing the uncompressed image data currently being processed.
|
||||
data_stream: Vec<u8>,
|
||||
/// Index in `data_stream` where the previous row starts.
|
||||
prev_start: usize,
|
||||
/// Index in `data_stream` where the current row starts.
|
||||
current_start: usize,
|
||||
/// Output transformations
|
||||
transform: Transformations,
|
||||
/// Function that can transform decompressed, unfiltered rows into final output.
|
||||
/// See the `transform.rs` module for more details.
|
||||
transform_fn: Option<TransformFn>,
|
||||
/// This buffer is only used so that `next_row` and `next_interlaced_row` can return reference
|
||||
/// to a byte slice. In a future version of this library, this buffer will be removed and
|
||||
/// `next_row` and `next_interlaced_row` will write directly into a user provided output buffer.
|
||||
scratch_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
/// The subframe specific information.
|
||||
///
|
||||
/// In APNG the frames are constructed by combining previous frame and a new subframe (through a
|
||||
/// combination of `dispose_op` and `overlay_op`). These sub frames specify individual dimension
|
||||
/// information and reuse the global interlace options. This struct encapsulates the state of where
|
||||
/// in a particular IDAT-frame or subframe we are.
|
||||
struct SubframeInfo {
|
||||
width: u32,
|
||||
height: u32,
|
||||
rowlen: usize,
|
||||
interlace: InterlaceIter,
|
||||
consumed_and_flushed: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum InterlaceIter {
|
||||
None(Range<u32>),
|
||||
Adam7(adam7::Adam7Iterator),
|
||||
}
|
||||
|
||||
/// Denote a frame as given by sequence numbers.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum SubframeIdx {
|
||||
/// The initial frame in an IDAT chunk without fcTL chunk applying to it.
|
||||
/// Note that this variant precedes `Some` as IDAT frames precede fdAT frames and all fdAT
|
||||
/// frames must have a fcTL applying to it.
|
||||
Initial,
|
||||
/// An IDAT frame with fcTL or an fdAT frame.
|
||||
Some(u32),
|
||||
/// The past-the-end index.
|
||||
End,
|
||||
}
|
||||
|
||||
impl<R: Read> Reader<R> {
|
||||
/// Reads all meta data until the next frame data starts.
|
||||
/// Requires IHDR before the IDAT and fcTL before fdAT.
|
||||
fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
|
||||
loop {
|
||||
// This is somewhat ugly. The API requires us to pass a buffer to decode_next but we
|
||||
// know that we will stop before reading any image data from the stream. Thus pass an
|
||||
// empty buffer and assert that remains empty.
|
||||
let mut buf = Vec::new();
|
||||
let state = self.decoder.decode_next(&mut buf)?;
|
||||
assert!(buf.is_empty());
|
||||
|
||||
match state {
|
||||
Some(Decoded::ChunkBegin(_, chunk::IDAT))
|
||||
| Some(Decoded::ChunkBegin(_, chunk::fdAT)) => break,
|
||||
Some(Decoded::FrameControl(_)) => {
|
||||
self.subframe = SubframeInfo::new(self.info());
|
||||
// The next frame is the one to which this chunk applies.
|
||||
self.next_frame = SubframeIdx::Some(self.fctl_read);
|
||||
// TODO: what about overflow here? That would imply there are more fctl chunks
|
||||
// than can be specified in the animation control but also that we have read
|
||||
// several gigabytes of data.
|
||||
self.fctl_read += 1;
|
||||
}
|
||||
None => {
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::MissingImageData.into(),
|
||||
))
|
||||
}
|
||||
// Ignore all other chunk events. Any other chunk may be between IDAT chunks, fdAT
|
||||
// chunks and their control chunks.
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let info = self
|
||||
.decoder
|
||||
.info()
|
||||
.ok_or(DecodingError::Format(FormatErrorInner::MissingIhdr.into()))?;
|
||||
self.bpp = info.bpp_in_prediction();
|
||||
self.subframe = SubframeInfo::new(info);
|
||||
|
||||
// Allocate output buffer.
|
||||
let buflen = self.output_line_size(self.subframe.width);
|
||||
self.decoder.decoder.limits.reserve_bytes(buflen)?;
|
||||
|
||||
self.prev_start = self.current_start;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get information on the image.
|
||||
///
|
||||
/// The structure will change as new frames of an animated image are decoded.
|
||||
pub fn info(&self) -> &Info<'static> {
|
||||
self.decoder.info().unwrap()
|
||||
}
|
||||
|
||||
/// Decodes the next frame into `buf`.
|
||||
///
|
||||
/// Note that this decodes raw subframes that need to be mixed according to blend-op and
|
||||
/// dispose-op by the caller.
|
||||
///
|
||||
/// The caller must always provide a buffer large enough to hold a complete frame (the APNG
|
||||
/// specification restricts subframes to the dimensions given in the image header). The region
|
||||
/// that has been written be checked afterwards by calling `info` after a successful call and
|
||||
/// inspecting the `frame_control` data. This requirement may be lifted in a later version of
|
||||
/// `png`.
|
||||
///
|
||||
/// Output lines will be written in row-major, packed matrix with width and height of the read
|
||||
/// frame (or subframe), all samples are in big endian byte order where this matters.
|
||||
pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<OutputInfo, DecodingError> {
|
||||
let subframe_idx = match self.decoder.info().unwrap().frame_control() {
|
||||
None => SubframeIdx::Initial,
|
||||
Some(_) => SubframeIdx::Some(self.fctl_read - 1),
|
||||
};
|
||||
|
||||
if self.next_frame == SubframeIdx::End {
|
||||
return Err(DecodingError::Parameter(
|
||||
ParameterErrorKind::PolledAfterEndOfImage.into(),
|
||||
));
|
||||
} else if self.next_frame != subframe_idx {
|
||||
// Advance until we've read the info / fcTL for this frame.
|
||||
self.read_until_image_data()?;
|
||||
}
|
||||
|
||||
if buf.len() < self.output_buffer_size() {
|
||||
return Err(DecodingError::Parameter(
|
||||
ParameterErrorKind::ImageBufferSize {
|
||||
expected: buf.len(),
|
||||
actual: self.output_buffer_size(),
|
||||
}
|
||||
.into(),
|
||||
));
|
||||
}
|
||||
|
||||
let (color_type, bit_depth) = self.output_color_type();
|
||||
let output_info = OutputInfo {
|
||||
width: self.subframe.width,
|
||||
height: self.subframe.height,
|
||||
color_type,
|
||||
bit_depth,
|
||||
line_size: self.output_line_size(self.subframe.width),
|
||||
};
|
||||
|
||||
self.data_stream.clear();
|
||||
self.current_start = 0;
|
||||
self.prev_start = 0;
|
||||
let width = self.info().width;
|
||||
if self.info().interlaced {
|
||||
while let Some(InterlacedRow {
|
||||
data: row,
|
||||
interlace,
|
||||
..
|
||||
}) = self.next_interlaced_row()?
|
||||
{
|
||||
let (line, pass) = match interlace {
|
||||
InterlaceInfo::Adam7 { line, pass, .. } => (line, pass),
|
||||
InterlaceInfo::Null => unreachable!("expected interlace information"),
|
||||
};
|
||||
let samples = color_type.samples() as u8;
|
||||
adam7::expand_pass(buf, width, row, pass, line, samples * (bit_depth as u8));
|
||||
}
|
||||
} else {
|
||||
for row in buf
|
||||
.chunks_exact_mut(output_info.line_size)
|
||||
.take(self.subframe.height as usize)
|
||||
{
|
||||
self.next_interlaced_row_impl(self.subframe.rowlen, row)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Advance over the rest of data for this (sub-)frame.
|
||||
if !self.subframe.consumed_and_flushed {
|
||||
self.decoder.finish_decoding()?;
|
||||
}
|
||||
|
||||
// Advance our state to expect the next frame.
|
||||
let past_end_subframe = self
|
||||
.info()
|
||||
.animation_control()
|
||||
.map(|ac| ac.num_frames)
|
||||
.unwrap_or(0);
|
||||
self.next_frame = match self.next_frame {
|
||||
SubframeIdx::End => unreachable!("Next frame called when already at image end"),
|
||||
// Reached the end of non-animated image.
|
||||
SubframeIdx::Initial if past_end_subframe == 0 => SubframeIdx::End,
|
||||
// An animated image, expecting first subframe.
|
||||
SubframeIdx::Initial => SubframeIdx::Some(0),
|
||||
// This was the last subframe, slightly fuzzy condition in case of programmer error.
|
||||
SubframeIdx::Some(idx) if past_end_subframe <= idx + 1 => SubframeIdx::End,
|
||||
// Expecting next subframe.
|
||||
SubframeIdx::Some(idx) => SubframeIdx::Some(idx + 1),
|
||||
};
|
||||
|
||||
Ok(output_info)
|
||||
}
|
||||
|
||||
/// Returns the next processed row of the image
|
||||
pub fn next_row(&mut self) -> Result<Option<Row>, DecodingError> {
|
||||
self.next_interlaced_row()
|
||||
.map(|v| v.map(|v| Row { data: v.data }))
|
||||
}
|
||||
|
||||
/// Returns the next processed row of the image
|
||||
pub fn next_interlaced_row(&mut self) -> Result<Option<InterlacedRow>, DecodingError> {
|
||||
let (rowlen, interlace) = match self.next_pass() {
|
||||
Some((rowlen, interlace)) => (rowlen, interlace),
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
let width = if let InterlaceInfo::Adam7 { width, .. } = interlace {
|
||||
width
|
||||
} else {
|
||||
self.subframe.width
|
||||
};
|
||||
let output_line_size = self.output_line_size(width);
|
||||
|
||||
// TODO: change the interface of `next_interlaced_row` to take an output buffer instead of
|
||||
// making us return a reference to a buffer that we own.
|
||||
let mut output_buffer = mem::take(&mut self.scratch_buffer);
|
||||
output_buffer.resize(output_line_size, 0u8);
|
||||
let ret = self.next_interlaced_row_impl(rowlen, &mut output_buffer);
|
||||
self.scratch_buffer = output_buffer;
|
||||
ret?;
|
||||
|
||||
Ok(Some(InterlacedRow {
|
||||
data: &self.scratch_buffer[..output_line_size],
|
||||
interlace,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Read the rest of the image and chunks and finish up, including text chunks or others
|
||||
/// This will discard the rest of the image if the image is not read already with [`Reader::next_frame`], [`Reader::next_row`] or [`Reader::next_interlaced_row`]
|
||||
pub fn finish(&mut self) -> Result<(), DecodingError> {
|
||||
self.next_frame = SubframeIdx::End;
|
||||
self.data_stream.clear();
|
||||
self.current_start = 0;
|
||||
self.prev_start = 0;
|
||||
loop {
|
||||
let mut buf = Vec::new();
|
||||
let state = self.decoder.decode_next(&mut buf)?;
|
||||
|
||||
if state.is_none() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch the next interlaced row and filter it according to our own transformations.
|
||||
fn next_interlaced_row_impl(
|
||||
&mut self,
|
||||
rowlen: usize,
|
||||
output_buffer: &mut [u8],
|
||||
) -> Result<(), DecodingError> {
|
||||
self.next_raw_interlaced_row(rowlen)?;
|
||||
assert_eq!(self.current_start - self.prev_start, rowlen - 1);
|
||||
let row = &self.data_stream[self.prev_start..self.current_start];
|
||||
|
||||
// Apply transformations and write resulting data to buffer.
|
||||
let transform_fn = {
|
||||
if self.transform_fn.is_none() {
|
||||
self.transform_fn = Some(create_transform_fn(self.info(), self.transform)?);
|
||||
}
|
||||
self.transform_fn.as_deref().unwrap()
|
||||
};
|
||||
transform_fn(row, output_buffer, self.info());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the color type and the number of bits per sample
|
||||
/// of the data returned by `Reader::next_row` and Reader::frames`.
|
||||
pub fn output_color_type(&self) -> (ColorType, BitDepth) {
|
||||
use crate::common::ColorType::*;
|
||||
let t = self.transform;
|
||||
let info = self.info();
|
||||
if t == Transformations::IDENTITY {
|
||||
(info.color_type, info.bit_depth)
|
||||
} else {
|
||||
let bits = match info.bit_depth as u8 {
|
||||
16 if t.intersects(Transformations::STRIP_16) => 8,
|
||||
n if n < 8
|
||||
&& (t.contains(Transformations::EXPAND)
|
||||
|| t.contains(Transformations::ALPHA)) =>
|
||||
{
|
||||
8
|
||||
}
|
||||
n => n,
|
||||
};
|
||||
let color_type =
|
||||
if t.contains(Transformations::EXPAND) || t.contains(Transformations::ALPHA) {
|
||||
let has_trns = info.trns.is_some() || t.contains(Transformations::ALPHA);
|
||||
match info.color_type {
|
||||
Grayscale if has_trns => GrayscaleAlpha,
|
||||
Rgb if has_trns => Rgba,
|
||||
Indexed if has_trns => Rgba,
|
||||
Indexed => Rgb,
|
||||
ct => ct,
|
||||
}
|
||||
} else {
|
||||
info.color_type
|
||||
};
|
||||
(color_type, BitDepth::from_u8(bits).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of bytes required to hold a deinterlaced image frame
|
||||
/// that is decoded using the given input transformations.
|
||||
pub fn output_buffer_size(&self) -> usize {
|
||||
let (width, height) = self.info().size();
|
||||
let size = self.output_line_size(width);
|
||||
size * height as usize
|
||||
}
|
||||
|
||||
/// Returns the number of bytes required to hold a deinterlaced row.
|
||||
pub fn output_line_size(&self, width: u32) -> usize {
|
||||
let (color, depth) = self.output_color_type();
|
||||
color.raw_row_length_from_width(depth, width) - 1
|
||||
}
|
||||
|
||||
fn next_pass(&mut self) -> Option<(usize, InterlaceInfo)> {
|
||||
match self.subframe.interlace {
|
||||
InterlaceIter::Adam7(ref mut adam7) => {
|
||||
let last_pass = adam7.current_pass();
|
||||
let (pass, line, width) = adam7.next()?;
|
||||
let rowlen = self.info().raw_row_length_from_width(width);
|
||||
if last_pass != pass {
|
||||
self.prev_start = self.current_start;
|
||||
}
|
||||
Some((rowlen, InterlaceInfo::Adam7 { pass, line, width }))
|
||||
}
|
||||
InterlaceIter::None(ref mut height) => {
|
||||
let _ = height.next()?;
|
||||
Some((self.subframe.rowlen, InterlaceInfo::Null))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the next raw interlaced row into `self.prev`.
|
||||
///
|
||||
/// The scanline is filtered against the previous scanline according to the specification.
|
||||
fn next_raw_interlaced_row(&mut self, rowlen: usize) -> Result<(), DecodingError> {
|
||||
// Read image data until we have at least one full row (but possibly more than one).
|
||||
while self.data_stream.len() - self.current_start < rowlen {
|
||||
if self.subframe.consumed_and_flushed {
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::NoMoreImageData.into(),
|
||||
));
|
||||
}
|
||||
|
||||
// Clear the current buffer before appending more data.
|
||||
if self.prev_start > 0 {
|
||||
self.data_stream.copy_within(self.prev_start.., 0);
|
||||
self.data_stream
|
||||
.truncate(self.data_stream.len() - self.prev_start);
|
||||
self.current_start -= self.prev_start;
|
||||
self.prev_start = 0;
|
||||
}
|
||||
|
||||
match self.decoder.decode_next(&mut self.data_stream)? {
|
||||
Some(Decoded::ImageData) => {}
|
||||
Some(Decoded::ImageDataFlushed) => {
|
||||
self.subframe.consumed_and_flushed = true;
|
||||
}
|
||||
None => {
|
||||
return Err(DecodingError::Format(
|
||||
if self.data_stream.is_empty() {
|
||||
FormatErrorInner::NoMoreImageData
|
||||
} else {
|
||||
FormatErrorInner::UnexpectedEndOfChunk
|
||||
}
|
||||
.into(),
|
||||
));
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
// Get a reference to the current row and point scan_start to the next one.
|
||||
let (prev, row) = self.data_stream.split_at_mut(self.current_start);
|
||||
|
||||
// Unfilter the row.
|
||||
let filter = FilterType::from_u8(row[0]).ok_or(DecodingError::Format(
|
||||
FormatErrorInner::UnknownFilterMethod(row[0]).into(),
|
||||
))?;
|
||||
unfilter(
|
||||
filter,
|
||||
self.bpp,
|
||||
&prev[self.prev_start..],
|
||||
&mut row[1..rowlen],
|
||||
);
|
||||
|
||||
self.prev_start = self.current_start + 1;
|
||||
self.current_start += rowlen;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SubframeInfo {
|
||||
fn not_yet_init() -> Self {
|
||||
SubframeInfo {
|
||||
width: 0,
|
||||
height: 0,
|
||||
rowlen: 0,
|
||||
interlace: InterlaceIter::None(0..0),
|
||||
consumed_and_flushed: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn new(info: &Info) -> Self {
|
||||
// The apng fctnl overrides width and height.
|
||||
// All other data is set by the main info struct.
|
||||
let (width, height) = if let Some(fc) = info.frame_control {
|
||||
(fc.width, fc.height)
|
||||
} else {
|
||||
(info.width, info.height)
|
||||
};
|
||||
|
||||
let interlace = if info.interlaced {
|
||||
InterlaceIter::Adam7(adam7::Adam7Iterator::new(width, height))
|
||||
} else {
|
||||
InterlaceIter::None(0..height)
|
||||
};
|
||||
|
||||
SubframeInfo {
|
||||
width,
|
||||
height,
|
||||
rowlen: info.raw_row_length_from_width(width),
|
||||
interlace,
|
||||
consumed_and_flushed: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
1948
third-party/vendor/png/src/decoder/stream.rs
vendored
Normal file
1948
third-party/vendor/png/src/decoder/stream.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
203
third-party/vendor/png/src/decoder/transform.rs
vendored
Normal file
203
third-party/vendor/png/src/decoder/transform.rs
vendored
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
//! Transforming a decompressed, unfiltered row into the final output.
|
||||
|
||||
mod palette;
|
||||
|
||||
use crate::{BitDepth, ColorType, DecodingError, Info, Transformations};
|
||||
|
||||
use super::stream::FormatErrorInner;
|
||||
|
||||
/// Type of a function that can transform a decompressed, unfiltered row (the
|
||||
/// 1st argument) into the final pixels (the 2nd argument), optionally using
|
||||
/// image metadata (e.g. PLTE data can be accessed using the 3rd argument).
|
||||
///
|
||||
/// TODO: If some precomputed state is needed (e.g. to make `expand_paletted...`
|
||||
/// faster) then consider changing this into `Box<dyn Fn(...)>`.
|
||||
pub type TransformFn = Box<dyn Fn(&[u8], &mut [u8], &Info) + Send + Sync>;
|
||||
|
||||
/// Returns a transformation function that should be applied to image rows based
|
||||
/// on 1) decoded image metadata (`info`) and 2) the transformations requested
|
||||
/// by the crate client (`transform`).
|
||||
pub fn create_transform_fn(
|
||||
info: &Info,
|
||||
transform: Transformations,
|
||||
) -> Result<TransformFn, DecodingError> {
|
||||
let color_type = info.color_type;
|
||||
let bit_depth = info.bit_depth as u8;
|
||||
let trns = info.trns.is_some() || transform.contains(Transformations::ALPHA);
|
||||
let expand =
|
||||
transform.contains(Transformations::EXPAND) || transform.contains(Transformations::ALPHA);
|
||||
let strip16 = bit_depth == 16 && transform.contains(Transformations::STRIP_16);
|
||||
match color_type {
|
||||
ColorType::Indexed if expand => {
|
||||
if info.palette.is_none() {
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::PaletteRequired.into(),
|
||||
));
|
||||
} else if let BitDepth::Sixteen = info.bit_depth {
|
||||
// This should have been caught earlier but let's check again. Can't hurt.
|
||||
return Err(DecodingError::Format(
|
||||
FormatErrorInner::InvalidColorBitDepth {
|
||||
color_type: ColorType::Indexed,
|
||||
bit_depth: BitDepth::Sixteen,
|
||||
}
|
||||
.into(),
|
||||
));
|
||||
} else {
|
||||
Ok(if trns {
|
||||
palette::create_expansion_into_rgba8(info)
|
||||
} else {
|
||||
palette::create_expansion_into_rgb8(info)
|
||||
})
|
||||
}
|
||||
}
|
||||
ColorType::Grayscale | ColorType::GrayscaleAlpha if bit_depth < 8 && expand => {
|
||||
Ok(Box::new(if trns {
|
||||
expand_gray_u8_with_trns
|
||||
} else {
|
||||
expand_gray_u8
|
||||
}))
|
||||
}
|
||||
ColorType::Grayscale | ColorType::Rgb if expand && trns => {
|
||||
Ok(Box::new(if bit_depth == 8 {
|
||||
expand_trns_line
|
||||
} else if strip16 {
|
||||
expand_trns_and_strip_line16
|
||||
} else {
|
||||
assert_eq!(bit_depth, 16);
|
||||
expand_trns_line16
|
||||
}))
|
||||
}
|
||||
ColorType::Grayscale | ColorType::GrayscaleAlpha | ColorType::Rgb | ColorType::Rgba
|
||||
if strip16 =>
|
||||
{
|
||||
Ok(Box::new(transform_row_strip16))
|
||||
}
|
||||
_ => Ok(Box::new(copy_row)),
|
||||
}
|
||||
}
|
||||
|
||||
fn copy_row(row: &[u8], output_buffer: &mut [u8], _: &Info) {
|
||||
output_buffer.copy_from_slice(row);
|
||||
}
|
||||
|
||||
fn transform_row_strip16(row: &[u8], output_buffer: &mut [u8], _: &Info) {
|
||||
for i in 0..row.len() / 2 {
|
||||
output_buffer[i] = row[2 * i];
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn unpack_bits<F>(input: &[u8], output: &mut [u8], channels: usize, bit_depth: u8, func: F)
|
||||
where
|
||||
F: Fn(u8, &mut [u8]),
|
||||
{
|
||||
// Only [1, 2, 4, 8] are valid bit depths
|
||||
assert!(matches!(bit_depth, 1 | 2 | 4 | 8));
|
||||
// Check that `input` is capable of producing a buffer as long as `output`:
|
||||
// number of shift lookups per bit depth * channels * input length
|
||||
assert!((8 / bit_depth as usize * channels).saturating_mul(input.len()) >= output.len());
|
||||
|
||||
let mut buf_chunks = output.chunks_exact_mut(channels);
|
||||
let mut iter = input.iter();
|
||||
|
||||
// `shift` iterates through the corresponding bit depth sequence:
|
||||
// 1 => &[7, 6, 5, 4, 3, 2, 1, 0],
|
||||
// 2 => &[6, 4, 2, 0],
|
||||
// 4 => &[4, 0],
|
||||
// 8 => &[0],
|
||||
//
|
||||
// `(0..8).step_by(bit_depth.into()).rev()` doesn't always optimize well so
|
||||
// shifts are calculated instead. (2023-08, Rust 1.71)
|
||||
|
||||
if bit_depth == 8 {
|
||||
for (&curr, chunk) in iter.zip(&mut buf_chunks) {
|
||||
func(curr, chunk);
|
||||
}
|
||||
} else {
|
||||
let mask = ((1u16 << bit_depth) - 1) as u8;
|
||||
|
||||
// These variables are initialized in the loop
|
||||
let mut shift = -1;
|
||||
let mut curr = 0;
|
||||
|
||||
for chunk in buf_chunks {
|
||||
if shift < 0 {
|
||||
shift = 8 - bit_depth as i32;
|
||||
curr = *iter.next().expect("input for unpack bits is not empty");
|
||||
}
|
||||
|
||||
let pixel = (curr >> shift) & mask;
|
||||
func(pixel, chunk);
|
||||
|
||||
shift -= bit_depth as i32;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_trns_line(input: &[u8], output: &mut [u8], info: &Info) {
|
||||
let channels = info.color_type.samples();
|
||||
let trns = info.trns.as_deref();
|
||||
for (input, output) in input
|
||||
.chunks_exact(channels)
|
||||
.zip(output.chunks_exact_mut(channels + 1))
|
||||
{
|
||||
output[..channels].copy_from_slice(input);
|
||||
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_trns_line16(input: &[u8], output: &mut [u8], info: &Info) {
|
||||
let channels = info.color_type.samples();
|
||||
let trns = info.trns.as_deref();
|
||||
for (input, output) in input
|
||||
.chunks_exact(channels * 2)
|
||||
.zip(output.chunks_exact_mut(channels * 2 + 2))
|
||||
{
|
||||
output[..channels * 2].copy_from_slice(input);
|
||||
if Some(input) == trns {
|
||||
output[channels * 2] = 0;
|
||||
output[channels * 2 + 1] = 0
|
||||
} else {
|
||||
output[channels * 2] = 0xFF;
|
||||
output[channels * 2 + 1] = 0xFF
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_trns_and_strip_line16(input: &[u8], output: &mut [u8], info: &Info) {
|
||||
let channels = info.color_type.samples();
|
||||
let trns = info.trns.as_deref();
|
||||
for (input, output) in input
|
||||
.chunks_exact(channels * 2)
|
||||
.zip(output.chunks_exact_mut(channels + 1))
|
||||
{
|
||||
for i in 0..channels {
|
||||
output[i] = input[i * 2];
|
||||
}
|
||||
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_gray_u8(row: &[u8], buffer: &mut [u8], info: &Info) {
|
||||
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
|
||||
unpack_bits(row, buffer, 1, info.bit_depth as u8, |val, chunk| {
|
||||
chunk[0] = val * scaling_factor
|
||||
});
|
||||
}
|
||||
|
||||
fn expand_gray_u8_with_trns(row: &[u8], buffer: &mut [u8], info: &Info) {
|
||||
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
|
||||
let trns = info.trns.as_deref();
|
||||
unpack_bits(row, buffer, 2, info.bit_depth as u8, |pixel, chunk| {
|
||||
chunk[1] = if let Some(trns) = trns {
|
||||
if pixel == trns[0] {
|
||||
0
|
||||
} else {
|
||||
0xFF
|
||||
}
|
||||
} else {
|
||||
0xFF
|
||||
};
|
||||
chunk[0] = pixel * scaling_factor
|
||||
});
|
||||
}
|
||||
361
third-party/vendor/png/src/decoder/transform/palette.rs
vendored
Normal file
361
third-party/vendor/png/src/decoder/transform/palette.rs
vendored
Normal file
|
|
@ -0,0 +1,361 @@
|
|||
//! Helpers for taking a slice of indeces (indices into `PLTE` and/or `trNS`
|
||||
//! entries) and transforming this into RGB or RGBA output.
|
||||
//!
|
||||
//! # Memoization
|
||||
//!
|
||||
//! To achieve higher throughput, `create_rgba_palette` combines entries from
|
||||
//! `PLTE` and `trNS` chunks into a single lookup table. This is based on the
|
||||
//! ideas explored in https://crbug.com/706134.
|
||||
//!
|
||||
//! Memoization is a trade-off:
|
||||
//! * On one hand, memoization requires spending X ns before starting to call
|
||||
//! `expand_paletted_...` functions.
|
||||
//! * On the other hand, memoization improves the throughput of the
|
||||
//! `expand_paletted_...` functions - they take Y ns less to process each byte
|
||||
//!
|
||||
//! Based on X and Y, we can try to calculate the breakeven point. It seems
|
||||
//! that memoization is a net benefit for images bigger than around 13x13 pixels.
|
||||
|
||||
use super::{unpack_bits, TransformFn};
|
||||
use crate::{BitDepth, Info};
|
||||
|
||||
pub fn create_expansion_into_rgb8(info: &Info) -> TransformFn {
|
||||
let rgba_palette = create_rgba_palette(info);
|
||||
|
||||
if info.bit_depth == BitDepth::Eight {
|
||||
Box::new(move |input, output, _info| expand_8bit_into_rgb8(input, output, &rgba_palette))
|
||||
} else {
|
||||
Box::new(move |input, output, info| expand_into_rgb8(input, output, info, &rgba_palette))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_expansion_into_rgba8(info: &Info) -> TransformFn {
|
||||
let rgba_palette = create_rgba_palette(info);
|
||||
Box::new(move |input, output, info| {
|
||||
expand_paletted_into_rgba8(input, output, info, &rgba_palette)
|
||||
})
|
||||
}
|
||||
|
||||
fn create_rgba_palette(info: &Info) -> [[u8; 4]; 256] {
|
||||
let palette = info.palette.as_deref().expect("Caller should verify");
|
||||
let trns = info.trns.as_deref().unwrap_or(&[]);
|
||||
|
||||
// > The tRNS chunk shall not contain more alpha values than there are palette
|
||||
// entries, but a tRNS chunk may contain fewer values than there are palette
|
||||
// entries. In this case, the alpha value for all remaining palette entries is
|
||||
// assumed to be 255.
|
||||
//
|
||||
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
|
||||
// completely empty / all pixels are non-transparent.
|
||||
let trns = if trns.len() <= palette.len() / 3 {
|
||||
trns
|
||||
} else {
|
||||
&[]
|
||||
};
|
||||
|
||||
// Default to black, opaque entries.
|
||||
let mut rgba_palette = [[0, 0, 0, 0xFF]; 256];
|
||||
|
||||
// Copy `palette` (RGB) entries into `rgba_palette`. This may clobber alpha
|
||||
// values in `rgba_palette` - we need to fix this later.
|
||||
{
|
||||
let mut palette_iter = palette;
|
||||
let mut rgba_iter = &mut rgba_palette[..];
|
||||
while palette_iter.len() >= 4 {
|
||||
// Copying 4 bytes at a time is more efficient than copying 3.
|
||||
// OTOH, this clobbers the alpha value in `rgba_iter[0][3]` - we
|
||||
// need to fix this later.
|
||||
rgba_iter[0].copy_from_slice(&palette_iter[0..4]);
|
||||
|
||||
palette_iter = &palette_iter[3..];
|
||||
rgba_iter = &mut rgba_iter[1..];
|
||||
}
|
||||
if palette_iter.len() > 0 {
|
||||
rgba_iter[0][0..3].copy_from_slice(&palette_iter[0..3]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy `trns` (alpha) entries into `rgba_palette`. `trns.len()` may be
|
||||
// smaller than `palette.len()` and therefore this is not sufficient to fix
|
||||
// all the clobbered alpha values.
|
||||
for (alpha, rgba) in trns.iter().copied().zip(rgba_palette.iter_mut()) {
|
||||
rgba[3] = alpha;
|
||||
}
|
||||
|
||||
// Unclobber the remaining alpha values.
|
||||
for rgba in rgba_palette[trns.len()..(palette.len() / 3)].iter_mut() {
|
||||
rgba[3] = 0xFF;
|
||||
}
|
||||
|
||||
rgba_palette
|
||||
}
|
||||
|
||||
fn expand_8bit_into_rgb8(mut input: &[u8], mut output: &mut [u8], rgba_palette: &[[u8; 4]; 256]) {
|
||||
while output.len() >= 4 {
|
||||
// Copying 4 bytes at a time is more efficient than 3.
|
||||
let rgba = &rgba_palette[input[0] as usize];
|
||||
output[0..4].copy_from_slice(rgba);
|
||||
|
||||
input = &input[1..];
|
||||
output = &mut output[3..];
|
||||
}
|
||||
if output.len() > 0 {
|
||||
let rgba = &rgba_palette[input[0] as usize];
|
||||
output[0..3].copy_from_slice(&rgba[0..3]);
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info, rgba_palette: &[[u8; 4]; 256]) {
|
||||
unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
|
||||
let rgba = &rgba_palette[i as usize];
|
||||
chunk[0] = rgba[0];
|
||||
chunk[1] = rgba[1];
|
||||
chunk[2] = rgba[2];
|
||||
})
|
||||
}
|
||||
|
||||
fn expand_paletted_into_rgba8(
|
||||
row: &[u8],
|
||||
buffer: &mut [u8],
|
||||
info: &Info,
|
||||
rgba_palette: &[[u8; 4]; 256],
|
||||
) {
|
||||
unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
|
||||
chunk.copy_from_slice(&rgba_palette[i as usize]);
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::{BitDepth, ColorType, Info, Transformations};
|
||||
|
||||
/// Old, non-memoized version of the code is used as a test oracle.
|
||||
fn oracle_expand_paletted_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info) {
|
||||
let palette = info.palette.as_deref().expect("Caller should verify");
|
||||
let black = [0, 0, 0];
|
||||
|
||||
super::unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
|
||||
let rgb = palette
|
||||
.get(3 * i as usize..3 * i as usize + 3)
|
||||
.unwrap_or(&black);
|
||||
chunk[0] = rgb[0];
|
||||
chunk[1] = rgb[1];
|
||||
chunk[2] = rgb[2];
|
||||
})
|
||||
}
|
||||
|
||||
/// Old, non-memoized version of the code is used as a test oracle.
|
||||
fn oracle_expand_paletted_into_rgba8(row: &[u8], buffer: &mut [u8], info: &Info) {
|
||||
let palette = info.palette.as_deref().expect("Caller should verify");
|
||||
let trns = info.trns.as_deref().unwrap_or(&[]);
|
||||
let black = [0, 0, 0];
|
||||
|
||||
// > The tRNS chunk shall not contain more alpha values than there are palette
|
||||
// entries, but a tRNS chunk may contain fewer values than there are palette
|
||||
// entries. In this case, the alpha value for all remaining palette entries is
|
||||
// assumed to be 255.
|
||||
//
|
||||
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
|
||||
// completely empty / all pixels are non-transparent.
|
||||
let trns = if trns.len() <= palette.len() / 3 {
|
||||
trns
|
||||
} else {
|
||||
&[]
|
||||
};
|
||||
|
||||
super::unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
|
||||
let (rgb, a) = (
|
||||
palette
|
||||
.get(3 * i as usize..3 * i as usize + 3)
|
||||
.unwrap_or(&black),
|
||||
*trns.get(i as usize).unwrap_or(&0xFF),
|
||||
);
|
||||
chunk[0] = rgb[0];
|
||||
chunk[1] = rgb[1];
|
||||
chunk[2] = rgb[2];
|
||||
chunk[3] = a;
|
||||
});
|
||||
}
|
||||
|
||||
fn create_info<'a>(src_bit_depth: u8, palette: &'a [u8], trns: Option<&'a [u8]>) -> Info<'a> {
|
||||
Info {
|
||||
color_type: ColorType::Indexed,
|
||||
bit_depth: BitDepth::from_u8(src_bit_depth).unwrap(),
|
||||
palette: Some(palette.into()),
|
||||
trns: trns.map(Into::into),
|
||||
..Info::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn expand_paletted(
|
||||
src: &[u8],
|
||||
src_bit_depth: u8,
|
||||
palette: &[u8],
|
||||
trns: Option<&[u8]>,
|
||||
) -> Vec<u8> {
|
||||
let info = create_info(src_bit_depth, palette, trns);
|
||||
let output_bytes_per_input_sample = match trns {
|
||||
None => 3,
|
||||
Some(_) => 4,
|
||||
};
|
||||
let samples_count_per_byte = (8 / src_bit_depth) as usize;
|
||||
let samples_count = src.len() * samples_count_per_byte;
|
||||
|
||||
let mut dst = vec![0; samples_count * output_bytes_per_input_sample];
|
||||
let transform_fn =
|
||||
super::super::create_transform_fn(&info, Transformations::EXPAND).unwrap();
|
||||
transform_fn(src, dst.as_mut_slice(), &info);
|
||||
|
||||
{
|
||||
// Compare the memoization-based calculations with the old, non-memoized code.
|
||||
let mut simple_dst = vec![0; samples_count * output_bytes_per_input_sample];
|
||||
if trns.is_none() {
|
||||
oracle_expand_paletted_into_rgb8(src, &mut simple_dst, &info)
|
||||
} else {
|
||||
oracle_expand_paletted_into_rgba8(src, &mut simple_dst, &info)
|
||||
}
|
||||
assert_eq!(&dst, &simple_dst);
|
||||
}
|
||||
|
||||
dst
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_paletted_rgba_8bit() {
|
||||
let actual = expand_paletted(
|
||||
&[0, 1, 2, 3], // src
|
||||
8, // src_bit_depth
|
||||
&[
|
||||
// palette
|
||||
0, 1, 2, // entry #0
|
||||
4, 5, 6, // entry #1
|
||||
8, 9, 10, // entry #2
|
||||
12, 13, 14, // entry #3
|
||||
],
|
||||
Some(&[3, 7, 11, 15]), // trns
|
||||
);
|
||||
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_paletted_rgb_8bit() {
|
||||
let actual = expand_paletted(
|
||||
&[0, 1, 2, 3], // src
|
||||
8, // src_bit_depth
|
||||
&[
|
||||
// palette
|
||||
0, 1, 2, // entry #0
|
||||
3, 4, 5, // entry #1
|
||||
6, 7, 8, // entry #2
|
||||
9, 10, 11, // entry #3
|
||||
],
|
||||
None, // trns
|
||||
);
|
||||
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_paletted_rgba_4bit() {
|
||||
let actual = expand_paletted(
|
||||
&[0x01, 0x23], // src
|
||||
4, // src_bit_depth
|
||||
&[
|
||||
// palette
|
||||
0, 1, 2, // entry #0
|
||||
4, 5, 6, // entry #1
|
||||
8, 9, 10, // entry #2
|
||||
12, 13, 14, // entry #3
|
||||
],
|
||||
Some(&[3, 7, 11, 15]), // trns
|
||||
);
|
||||
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_paletted_rgb_4bit() {
|
||||
let actual = expand_paletted(
|
||||
&[0x01, 0x23], // src
|
||||
4, // src_bit_depth
|
||||
&[
|
||||
// palette
|
||||
0, 1, 2, // entry #0
|
||||
3, 4, 5, // entry #1
|
||||
6, 7, 8, // entry #2
|
||||
9, 10, 11, // entry #3
|
||||
],
|
||||
None, // trns
|
||||
);
|
||||
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_paletted_rgba_8bit_more_trns_entries_than_palette_entries() {
|
||||
let actual = expand_paletted(
|
||||
&[0, 1, 2, 3], // src
|
||||
8, // src_bit_depth
|
||||
&[
|
||||
// palette
|
||||
0, 1, 2, // entry #0
|
||||
4, 5, 6, // entry #1
|
||||
8, 9, 10, // entry #2
|
||||
12, 13, 14, // entry #3
|
||||
],
|
||||
Some(&[123; 5]), // trns
|
||||
);
|
||||
|
||||
// Invalid (too-long) `trns` means that we'll use 0xFF / opaque alpha everywhere.
|
||||
assert_eq!(
|
||||
actual,
|
||||
vec![0, 1, 2, 0xFF, 4, 5, 6, 0xFF, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_expand_paletted_rgba_8bit_less_trns_entries_than_palette_entries() {
|
||||
let actual = expand_paletted(
|
||||
&[0, 1, 2, 3], // src
|
||||
8, // src_bit_depth
|
||||
&[
|
||||
// palette
|
||||
0, 1, 2, // entry #0
|
||||
4, 5, 6, // entry #1
|
||||
8, 9, 10, // entry #2
|
||||
12, 13, 14, // entry #3
|
||||
],
|
||||
Some(&[3, 7]), // trns
|
||||
);
|
||||
|
||||
// Too-short `trns` is treated differently from too-long - only missing entries are
|
||||
// replaced with 0XFF / opaque.
|
||||
assert_eq!(
|
||||
actual,
|
||||
vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_rgba_palette() {
|
||||
fn create_expected_rgba_palette(plte: &[u8], trns: &[u8]) -> [[u8; 4]; 256] {
|
||||
let mut rgba = [[1, 2, 3, 4]; 256];
|
||||
for (i, rgba) in rgba.iter_mut().enumerate() {
|
||||
rgba[0] = plte.get(i * 3 + 0).map(|&r| r).unwrap_or(0);
|
||||
rgba[1] = plte.get(i * 3 + 1).map(|&g| g).unwrap_or(0);
|
||||
rgba[2] = plte.get(i * 3 + 2).map(|&b| b).unwrap_or(0);
|
||||
rgba[3] = trns.get(i * 1 + 0).map(|&a| a).unwrap_or(0xFF);
|
||||
}
|
||||
rgba
|
||||
}
|
||||
|
||||
for plte_len in 1..=32 {
|
||||
for trns_len in 0..=plte_len {
|
||||
let plte: Vec<u8> = (0..plte_len * 3).collect();
|
||||
let trns: Vec<u8> = (0..trns_len).map(|alpha| alpha + 200).collect();
|
||||
|
||||
let info = create_info(8, &plte, Some(&trns));
|
||||
let expected = create_expected_rgba_palette(&plte, &trns);
|
||||
let actual = super::create_rgba_palette(&info);
|
||||
assert_eq!(actual, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
230
third-party/vendor/png/src/decoder/zlib.rs
vendored
Normal file
230
third-party/vendor/png/src/decoder/zlib.rs
vendored
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
use super::{stream::FormatErrorInner, DecodingError, CHUNCK_BUFFER_SIZE};
|
||||
|
||||
use fdeflate::Decompressor;
|
||||
|
||||
/// Ergonomics wrapper around `miniz_oxide::inflate::stream` for zlib compressed data.
|
||||
pub(super) struct ZlibStream {
|
||||
/// Current decoding state.
|
||||
state: Box<fdeflate::Decompressor>,
|
||||
/// If there has been a call to decompress already.
|
||||
started: bool,
|
||||
/// Remaining buffered decoded bytes.
|
||||
/// The decoder sometimes wants inspect some already finished bytes for further decoding. So we
|
||||
/// keep a total of 32KB of decoded data available as long as more data may be appended.
|
||||
out_buffer: Vec<u8>,
|
||||
/// The first index of `out_buffer` where new data can be written.
|
||||
out_pos: usize,
|
||||
/// The first index of `out_buffer` that hasn't yet been passed to our client
|
||||
/// (i.e. not yet appended to the `image_data` parameter of `fn decompress` or `fn
|
||||
/// finish_compressed_chunks`).
|
||||
read_pos: usize,
|
||||
/// Limit on how many bytes can be decompressed in total. This field is mostly used for
|
||||
/// performance optimizations (e.g. to avoid allocating and zeroing out large buffers when only
|
||||
/// a small image is being decoded).
|
||||
max_total_output: usize,
|
||||
/// Ignore and do not calculate the Adler-32 checksum. Defaults to `true`.
|
||||
///
|
||||
/// This flag overrides `TINFL_FLAG_COMPUTE_ADLER32`.
|
||||
///
|
||||
/// This flag should not be modified after decompression has started.
|
||||
ignore_adler32: bool,
|
||||
}
|
||||
|
||||
impl ZlibStream {
|
||||
pub(crate) fn new() -> Self {
|
||||
ZlibStream {
|
||||
state: Box::new(Decompressor::new()),
|
||||
started: false,
|
||||
out_buffer: Vec::new(),
|
||||
out_pos: 0,
|
||||
read_pos: 0,
|
||||
max_total_output: usize::MAX,
|
||||
ignore_adler32: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn reset(&mut self) {
|
||||
self.started = false;
|
||||
self.out_buffer.clear();
|
||||
self.out_pos = 0;
|
||||
self.read_pos = 0;
|
||||
self.max_total_output = usize::MAX;
|
||||
*self.state = Decompressor::new();
|
||||
}
|
||||
|
||||
pub(crate) fn set_max_total_output(&mut self, n: usize) {
|
||||
self.max_total_output = n;
|
||||
}
|
||||
|
||||
/// Set the `ignore_adler32` flag and return `true` if the flag was
|
||||
/// successfully set.
|
||||
///
|
||||
/// The default is `true`.
|
||||
///
|
||||
/// This flag cannot be modified after decompression has started until the
|
||||
/// [ZlibStream] is reset.
|
||||
pub(crate) fn set_ignore_adler32(&mut self, flag: bool) -> bool {
|
||||
if !self.started {
|
||||
self.ignore_adler32 = flag;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the `ignore_adler32` flag.
|
||||
pub(crate) fn ignore_adler32(&self) -> bool {
|
||||
self.ignore_adler32
|
||||
}
|
||||
|
||||
/// Fill the decoded buffer as far as possible from `data`.
|
||||
/// On success returns the number of consumed input bytes.
|
||||
pub(crate) fn decompress(
|
||||
&mut self,
|
||||
data: &[u8],
|
||||
image_data: &mut Vec<u8>,
|
||||
) -> Result<usize, DecodingError> {
|
||||
// There may be more data past the adler32 checksum at the end of the deflate stream. We
|
||||
// match libpng's default behavior and ignore any trailing data. In the future we may want
|
||||
// to add a flag to control this behavior.
|
||||
if self.state.is_done() {
|
||||
return Ok(data.len());
|
||||
}
|
||||
|
||||
self.prepare_vec_for_appending();
|
||||
|
||||
if !self.started && self.ignore_adler32 {
|
||||
self.state.ignore_adler32();
|
||||
}
|
||||
|
||||
let (in_consumed, out_consumed) = self
|
||||
.state
|
||||
.read(data, self.out_buffer.as_mut_slice(), self.out_pos, false)
|
||||
.map_err(|err| {
|
||||
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
|
||||
})?;
|
||||
|
||||
self.started = true;
|
||||
self.out_pos += out_consumed;
|
||||
self.transfer_finished_data(image_data);
|
||||
self.compact_out_buffer_if_needed();
|
||||
|
||||
Ok(in_consumed)
|
||||
}
|
||||
|
||||
/// Called after all consecutive IDAT chunks were handled.
|
||||
///
|
||||
/// The compressed stream can be split on arbitrary byte boundaries. This enables some cleanup
|
||||
/// within the decompressor and flushing additional data which may have been kept back in case
|
||||
/// more data were passed to it.
|
||||
pub(crate) fn finish_compressed_chunks(
|
||||
&mut self,
|
||||
image_data: &mut Vec<u8>,
|
||||
) -> Result<(), DecodingError> {
|
||||
if !self.started {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
while !self.state.is_done() {
|
||||
self.prepare_vec_for_appending();
|
||||
let (_in_consumed, out_consumed) = self
|
||||
.state
|
||||
.read(&[], self.out_buffer.as_mut_slice(), self.out_pos, true)
|
||||
.map_err(|err| {
|
||||
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
|
||||
})?;
|
||||
|
||||
self.out_pos += out_consumed;
|
||||
|
||||
if !self.state.is_done() {
|
||||
let transferred = self.transfer_finished_data(image_data);
|
||||
assert!(
|
||||
transferred > 0 || out_consumed > 0,
|
||||
"No more forward progress made in stream decoding."
|
||||
);
|
||||
self.compact_out_buffer_if_needed();
|
||||
}
|
||||
}
|
||||
|
||||
self.transfer_finished_data(image_data);
|
||||
self.out_buffer.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resize the vector to allow allocation of more data.
|
||||
fn prepare_vec_for_appending(&mut self) {
|
||||
// The `debug_assert` below explains why we can use `>=` instead of `>` in the condition
|
||||
// that compares `self.out_post >= self.max_total_output` in the next `if` statement.
|
||||
debug_assert!(!self.state.is_done());
|
||||
if self.out_pos >= self.max_total_output {
|
||||
// This can happen when the `max_total_output` was miscalculated (e.g.
|
||||
// because the `IHDR` chunk was malformed and didn't match the `IDAT` chunk). In
|
||||
// this case, let's reset `self.max_total_output` before further calculations.
|
||||
self.max_total_output = usize::MAX;
|
||||
}
|
||||
|
||||
let current_len = self.out_buffer.len();
|
||||
let desired_len = self
|
||||
.out_pos
|
||||
.saturating_add(CHUNCK_BUFFER_SIZE)
|
||||
.min(self.max_total_output);
|
||||
if current_len >= desired_len {
|
||||
return;
|
||||
}
|
||||
|
||||
let buffered_len = self.decoding_size(self.out_buffer.len());
|
||||
debug_assert!(self.out_buffer.len() <= buffered_len);
|
||||
self.out_buffer.resize(buffered_len, 0u8);
|
||||
}
|
||||
|
||||
fn decoding_size(&self, len: usize) -> usize {
|
||||
// Allocate one more chunk size than currently or double the length while ensuring that the
|
||||
// allocation is valid and that any cursor within it will be valid.
|
||||
len
|
||||
// This keeps the buffer size a power-of-two, required by miniz_oxide.
|
||||
.saturating_add(CHUNCK_BUFFER_SIZE.max(len))
|
||||
// Ensure all buffer indices are valid cursor positions.
|
||||
// Note: both cut off and zero extension give correct results.
|
||||
.min(u64::max_value() as usize)
|
||||
// Ensure the allocation request is valid.
|
||||
// TODO: maximum allocation limits?
|
||||
.min(isize::max_value() as usize)
|
||||
// Don't unnecessarily allocate more than `max_total_output`.
|
||||
.min(self.max_total_output)
|
||||
}
|
||||
|
||||
fn transfer_finished_data(&mut self, image_data: &mut Vec<u8>) -> usize {
|
||||
let transferred = &self.out_buffer[self.read_pos..self.out_pos];
|
||||
image_data.extend_from_slice(transferred);
|
||||
self.read_pos = self.out_pos;
|
||||
transferred.len()
|
||||
}
|
||||
|
||||
fn compact_out_buffer_if_needed(&mut self) {
|
||||
// [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#10Compression) says that
|
||||
// "deflate/inflate compression with a sliding window (which is an upper bound on the
|
||||
// distances appearing in the deflate stream) of at most 32768 bytes".
|
||||
//
|
||||
// `fdeflate` requires that we keep this many most recently decompressed bytes in the
|
||||
// `out_buffer` - this allows referring back to them when handling "length and distance
|
||||
// codes" in the deflate stream).
|
||||
const LOOKBACK_SIZE: usize = 32768;
|
||||
|
||||
// Compact `self.out_buffer` when "needed". Doing this conditionally helps to put an upper
|
||||
// bound on the amortized cost of copying the data within `self.out_buffer`.
|
||||
//
|
||||
// TODO: The factor of 4 is an ad-hoc heuristic. Consider measuring and using a different
|
||||
// factor. (Early experiments seem to indicate that factor of 4 is faster than a factor of
|
||||
// 2 and 4 * `LOOKBACK_SIZE` seems like an acceptable memory trade-off. Higher factors
|
||||
// result in higher memory usage, but the compaction cost is lower - factor of 4 means
|
||||
// that 1 byte gets copied during compaction for 3 decompressed bytes.)
|
||||
if self.out_pos > LOOKBACK_SIZE * 4 {
|
||||
// Only preserve the `lookback_buffer` and "throw away" the earlier prefix.
|
||||
let lookback_buffer = self.out_pos.saturating_sub(LOOKBACK_SIZE)..self.out_pos;
|
||||
let preserved_len = lookback_buffer.len();
|
||||
self.out_buffer.copy_within(lookback_buffer, 0);
|
||||
self.read_pos = preserved_len;
|
||||
self.out_pos = preserved_len;
|
||||
}
|
||||
}
|
||||
}
|
||||
2413
third-party/vendor/png/src/encoder.rs
vendored
Normal file
2413
third-party/vendor/png/src/encoder.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
1073
third-party/vendor/png/src/filter.rs
vendored
Normal file
1073
third-party/vendor/png/src/filter.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
88
third-party/vendor/png/src/lib.rs
vendored
Normal file
88
third-party/vendor/png/src/lib.rs
vendored
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
//! # PNG encoder and decoder
|
||||
//!
|
||||
//! This crate contains a PNG encoder and decoder. It supports reading of single lines or whole frames.
|
||||
//!
|
||||
//! ## The decoder
|
||||
//!
|
||||
//! The most important types for decoding purposes are [`Decoder`](struct.Decoder.html) and
|
||||
//! [`Reader`](struct.Reader.html). They both wrap a `std::io::Read`.
|
||||
//! `Decoder` serves as a builder for `Reader`. Calling `Decoder::read_info` reads from the `Read` until the
|
||||
//! image data is reached.
|
||||
//!
|
||||
//! ### Using the decoder
|
||||
//! ```
|
||||
//! use std::fs::File;
|
||||
//! // The decoder is a build for reader and can be used to set various decoding options
|
||||
//! // via `Transformations`. The default output transformation is `Transformations::IDENTITY`.
|
||||
//! let decoder = png::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
|
||||
//! let mut reader = decoder.read_info().unwrap();
|
||||
//! // Allocate the output buffer.
|
||||
//! let mut buf = vec![0; reader.output_buffer_size()];
|
||||
//! // Read the next frame. An APNG might contain multiple frames.
|
||||
//! let info = reader.next_frame(&mut buf).unwrap();
|
||||
//! // Grab the bytes of the image.
|
||||
//! let bytes = &buf[..info.buffer_size()];
|
||||
//! // Inspect more details of the last read frame.
|
||||
//! let in_animation = reader.info().frame_control.is_some();
|
||||
//! ```
|
||||
//!
|
||||
//! ## Encoder
|
||||
//! ### Using the encoder
|
||||
//!
|
||||
//! ```no_run
|
||||
//! // For reading and opening files
|
||||
//! use std::path::Path;
|
||||
//! use std::fs::File;
|
||||
//! use std::io::BufWriter;
|
||||
//!
|
||||
//! let path = Path::new(r"/path/to/image.png");
|
||||
//! let file = File::create(path).unwrap();
|
||||
//! let ref mut w = BufWriter::new(file);
|
||||
//!
|
||||
//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
|
||||
//! encoder.set_color(png::ColorType::Rgba);
|
||||
//! encoder.set_depth(png::BitDepth::Eight);
|
||||
//! encoder.set_source_gamma(png::ScaledFloat::from_scaled(45455)); // 1.0 / 2.2, scaled by 100000
|
||||
//! encoder.set_source_gamma(png::ScaledFloat::new(1.0 / 2.2)); // 1.0 / 2.2, unscaled, but rounded
|
||||
//! let source_chromaticities = png::SourceChromaticities::new( // Using unscaled instantiation here
|
||||
//! (0.31270, 0.32900),
|
||||
//! (0.64000, 0.33000),
|
||||
//! (0.30000, 0.60000),
|
||||
//! (0.15000, 0.06000)
|
||||
//! );
|
||||
//! encoder.set_source_chromaticities(source_chromaticities);
|
||||
//! let mut writer = encoder.write_header().unwrap();
|
||||
//!
|
||||
//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
|
||||
//! writer.write_image_data(&data).unwrap(); // Save
|
||||
//! ```
|
||||
//!
|
||||
|
||||
#![cfg_attr(feature = "unstable", feature(portable_simd))]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
|
||||
mod adam7;
|
||||
pub mod chunk;
|
||||
mod common;
|
||||
mod decoder;
|
||||
mod encoder;
|
||||
mod filter;
|
||||
mod srgb;
|
||||
pub mod text_metadata;
|
||||
mod traits;
|
||||
|
||||
pub use crate::common::*;
|
||||
pub use crate::decoder::{
|
||||
DecodeOptions, Decoded, Decoder, DecodingError, Limits, OutputInfo, Reader, StreamingDecoder,
|
||||
};
|
||||
pub use crate::encoder::{Encoder, EncodingError, StreamWriter, Writer};
|
||||
pub use crate::filter::{AdaptiveFilterType, FilterType};
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test_utils;
|
||||
|
||||
#[cfg(feature = "benchmarks")]
|
||||
pub mod benchable_apis;
|
||||
30
third-party/vendor/png/src/srgb.rs
vendored
Normal file
30
third-party/vendor/png/src/srgb.rs
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
use crate::{ScaledFloat, SourceChromaticities};
|
||||
|
||||
/// Get the gamma that should be substituted for images conforming to the sRGB color space.
|
||||
pub fn substitute_gamma() -> ScaledFloat {
|
||||
// Value taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
|
||||
ScaledFloat::from_scaled(45455)
|
||||
}
|
||||
|
||||
/// Get the chromaticities that should be substituted for images conforming to the sRGB color space.
|
||||
pub fn substitute_chromaticities() -> SourceChromaticities {
|
||||
// Values taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
|
||||
SourceChromaticities {
|
||||
white: (
|
||||
ScaledFloat::from_scaled(31270),
|
||||
ScaledFloat::from_scaled(32900),
|
||||
),
|
||||
red: (
|
||||
ScaledFloat::from_scaled(64000),
|
||||
ScaledFloat::from_scaled(33000),
|
||||
),
|
||||
green: (
|
||||
ScaledFloat::from_scaled(30000),
|
||||
ScaledFloat::from_scaled(60000),
|
||||
),
|
||||
blue: (
|
||||
ScaledFloat::from_scaled(15000),
|
||||
ScaledFloat::from_scaled(6000),
|
||||
),
|
||||
}
|
||||
}
|
||||
118
third-party/vendor/png/src/test_utils.rs
vendored
Normal file
118
third-party/vendor/png/src/test_utils.rs
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
//! A set of test utilities.
|
||||
//!
|
||||
//! There is some overlap between this module and `src/encoder.rs` module, but:
|
||||
//!
|
||||
//! * This module (unlike `src/encoder.rs`) performs no validation of the data being written - this
|
||||
//! allows building testcases that use arbitrary, potentially invalid PNGs as input.
|
||||
//! * This module can be reused from `benches/decoder.rs` (a separate crate).
|
||||
|
||||
use byteorder::WriteBytesExt;
|
||||
use std::io::Write;
|
||||
|
||||
/// Generates a store-only, non-compressed image:
|
||||
///
|
||||
/// * `00` compression mode (i.e.`BTYPE` = `00` = no compression) is used
|
||||
/// * No filter is applied to the image rows
|
||||
///
|
||||
/// Currently the image always has the following properties:
|
||||
///
|
||||
/// * Single `IDAT` chunk
|
||||
/// * Zlib chunks of maximum possible size
|
||||
/// * 8-bit RGBA
|
||||
///
|
||||
/// These images are somewhat artificial, but may be useful for benchmarking performance of parts
|
||||
/// outside of `fdeflate` crate and/or the `unfilter` function (e.g. these images were originally
|
||||
/// used to evaluate changes to minimize copying of image pixels between various buffers - see
|
||||
/// [this
|
||||
/// discussion](https://github.com/image-rs/image-png/discussions/416#discussioncomment-7436871)
|
||||
/// for more details).
|
||||
#[allow(dead_code)] // Used from `benches/decoder.rs`
|
||||
pub fn write_noncompressed_png(w: &mut impl Write, size: u32, idat_bytes: usize) {
|
||||
write_png_sig(w);
|
||||
write_rgba8_ihdr_with_width(w, size);
|
||||
write_rgba8_idats(w, size, idat_bytes);
|
||||
write_iend(w);
|
||||
}
|
||||
|
||||
/// Writes PNG signature.
|
||||
/// See http://www.libpng.org/pub/png/spec/1.2/PNG-Structure.html#PNG-file-signature
|
||||
pub fn write_png_sig(w: &mut impl Write) {
|
||||
const SIG: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
|
||||
w.write_all(&SIG).unwrap();
|
||||
}
|
||||
|
||||
/// Writes an arbitrary PNG chunk.
|
||||
pub fn write_chunk(w: &mut impl Write, chunk_type: &[u8], data: &[u8]) {
|
||||
assert_eq!(chunk_type.len(), 4);
|
||||
let crc = {
|
||||
let input = chunk_type
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(data.iter().copied())
|
||||
.collect::<Vec<_>>();
|
||||
crc32fast::hash(input.as_slice())
|
||||
};
|
||||
w.write_u32::<byteorder::BigEndian>(data.len() as u32)
|
||||
.unwrap();
|
||||
w.write_all(chunk_type).unwrap();
|
||||
w.write_all(data).unwrap();
|
||||
w.write_u32::<byteorder::BigEndian>(crc).unwrap();
|
||||
}
|
||||
|
||||
/// Writes an IHDR chunk that indicates a non-interlaced RGBA8 that uses the same height and
|
||||
/// `width`. See http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
|
||||
pub fn write_rgba8_ihdr_with_width(w: &mut impl Write, width: u32) {
|
||||
let mut data = Vec::new();
|
||||
data.write_u32::<byteorder::BigEndian>(width).unwrap();
|
||||
data.write_u32::<byteorder::BigEndian>(width).unwrap(); // height
|
||||
data.write_u8(8).unwrap(); // bit depth = always 8-bits per channel
|
||||
data.write_u8(6).unwrap(); // color type = color + alpha
|
||||
data.write_u8(0).unwrap(); // compression method (0 is the only allowed value)
|
||||
data.write_u8(0).unwrap(); // filter method (0 is the only allowed value)
|
||||
data.write_u8(0).unwrap(); // interlace method = no interlacing
|
||||
write_chunk(w, b"IHDR", &data);
|
||||
}
|
||||
|
||||
/// Generates RGBA8 `width` x `height` image and wraps it in a store-only zlib container.
|
||||
pub fn generate_rgba8_with_width_and_height(width: u32, height: u32) -> Vec<u8> {
|
||||
// Generate arbitrary test pixels.
|
||||
let image_pixels = {
|
||||
let mut row = Vec::new();
|
||||
row.write_u8(0).unwrap(); // filter = no filter
|
||||
|
||||
let row_pixels = (0..width).flat_map(|i| {
|
||||
let color: u8 = (i * 255 / width) as u8;
|
||||
let alpha: u8 = 0xff;
|
||||
[color, 255 - color, color / 2, alpha]
|
||||
});
|
||||
row.extend(row_pixels);
|
||||
|
||||
std::iter::repeat(row)
|
||||
.take(height as usize)
|
||||
.flatten()
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let mut zlib_data = Vec::new();
|
||||
let mut store_only_compressor =
|
||||
fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(&mut zlib_data)).unwrap();
|
||||
store_only_compressor.write_data(&image_pixels).unwrap();
|
||||
store_only_compressor.finish().unwrap();
|
||||
|
||||
zlib_data
|
||||
}
|
||||
|
||||
/// Writes an IDAT chunk.
|
||||
pub fn write_rgba8_idats(w: &mut impl Write, size: u32, idat_bytes: usize) {
|
||||
let data = generate_rgba8_with_width_and_height(size, size);
|
||||
|
||||
for chunk in data.chunks(idat_bytes) {
|
||||
write_chunk(w, b"IDAT", chunk);
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an IEND chunk.
|
||||
/// See http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IEND
|
||||
pub fn write_iend(w: &mut impl Write) {
|
||||
write_chunk(w, b"IEND", &[]);
|
||||
}
|
||||
586
third-party/vendor/png/src/text_metadata.rs
vendored
Normal file
586
third-party/vendor/png/src/text_metadata.rs
vendored
Normal file
|
|
@ -0,0 +1,586 @@
|
|||
//! # Text chunks (tEXt/zTXt/iTXt) structs and functions
|
||||
//!
|
||||
//! The [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#11textinfo) optionally allows for
|
||||
//! embedded text chunks in the file. They may appear either before or after the image data
|
||||
//! chunks. There are three kinds of text chunks.
|
||||
//! - `tEXt`: This has a `keyword` and `text` field, and is ISO 8859-1 encoded.
|
||||
//! - `zTXt`: This is semantically the same as `tEXt`, i.e. it has the same fields and
|
||||
//! encoding, but the `text` field is compressed before being written into the PNG file.
|
||||
//! - `iTXt`: This chunk allows for its `text` field to be any valid UTF-8, and supports
|
||||
//! compression of the text field as well.
|
||||
//!
|
||||
//! The `ISO 8859-1` encoding technically doesn't allow any control characters
|
||||
//! to be used, but in practice these values are encountered anyway. This can
|
||||
//! either be the extended `ISO-8859-1` encoding with control characters or the
|
||||
//! `Windows-1252` encoding. This crate assumes the `ISO-8859-1` encoding is
|
||||
//! used.
|
||||
//!
|
||||
//! ## Reading text chunks
|
||||
//!
|
||||
//! As a PNG is decoded, any text chunk encountered is appended the
|
||||
//! [`Info`](`crate::common::Info`) struct, in the `uncompressed_latin1_text`,
|
||||
//! `compressed_latin1_text`, and the `utf8_text` fields depending on whether the encountered
|
||||
//! chunk is `tEXt`, `zTXt`, or `iTXt`.
|
||||
//!
|
||||
//! ```
|
||||
//! use std::fs::File;
|
||||
//! use std::iter::FromIterator;
|
||||
//! use std::path::PathBuf;
|
||||
//!
|
||||
//! // Opening a png file that has a zTXt chunk
|
||||
//! let decoder = png::Decoder::new(
|
||||
//! File::open(PathBuf::from_iter([
|
||||
//! "tests",
|
||||
//! "text_chunk_examples",
|
||||
//! "ztxt_example.png",
|
||||
//! ]))
|
||||
//! .unwrap(),
|
||||
//! );
|
||||
//! let mut reader = decoder.read_info().unwrap();
|
||||
//! // If the text chunk is before the image data frames, `reader.info()` already contains the text.
|
||||
//! for text_chunk in &reader.info().compressed_latin1_text {
|
||||
//! println!("{:?}", text_chunk.keyword); // Prints the keyword
|
||||
//! println!("{:#?}", text_chunk); // Prints out the text chunk.
|
||||
//! // To get the uncompressed text, use the `get_text` method.
|
||||
//! println!("{}", text_chunk.get_text().unwrap());
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Writing text chunks
|
||||
//!
|
||||
//! There are two ways to write text chunks: the first is to add the appropriate text structs directly to the encoder header before the header is written to file.
|
||||
//! To add a text chunk at any point in the stream, use the `write_text_chunk` method.
|
||||
//!
|
||||
//! ```
|
||||
//! # use png::text_metadata::{ITXtChunk, ZTXtChunk};
|
||||
//! # use std::env;
|
||||
//! # use std::fs::File;
|
||||
//! # use std::io::BufWriter;
|
||||
//! # use std::iter::FromIterator;
|
||||
//! # use std::path::PathBuf;
|
||||
//! # let file = File::create(PathBuf::from_iter(["target", "text_chunk.png"])).unwrap();
|
||||
//! # let ref mut w = BufWriter::new(file);
|
||||
//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
|
||||
//! encoder.set_color(png::ColorType::Rgba);
|
||||
//! encoder.set_depth(png::BitDepth::Eight);
|
||||
//! // Adding text chunks to the header
|
||||
//! encoder
|
||||
//! .add_text_chunk(
|
||||
//! "Testing tEXt".to_string(),
|
||||
//! "This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
|
||||
//! )
|
||||
//! .unwrap();
|
||||
//! encoder
|
||||
//! .add_ztxt_chunk(
|
||||
//! "Testing zTXt".to_string(),
|
||||
//! "This is a zTXt chunk that is compressed in the png file.".to_string(),
|
||||
//! )
|
||||
//! .unwrap();
|
||||
//! encoder
|
||||
//! .add_itxt_chunk(
|
||||
//! "Testing iTXt".to_string(),
|
||||
//! "iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
|
||||
//! )
|
||||
//! .unwrap();
|
||||
//!
|
||||
//! let mut writer = encoder.write_header().unwrap();
|
||||
//!
|
||||
//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
|
||||
//! writer.write_image_data(&data).unwrap(); // Save
|
||||
//!
|
||||
//! // We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
|
||||
//! let tail_ztxt_chunk = ZTXtChunk::new("Comment".to_string(), "A zTXt chunk after the image data.".to_string());
|
||||
//! writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
|
||||
//!
|
||||
//! // The fields of the text chunk are public, so they can be mutated before being written to the file.
|
||||
//! let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
|
||||
//! tail_itxt_chunk.compressed = true;
|
||||
//! tail_itxt_chunk.language_tag = "hi".to_string();
|
||||
//! tail_itxt_chunk.translated_keyword = "लेखक".to_string();
|
||||
//! writer.write_text_chunk(&tail_itxt_chunk).unwrap();
|
||||
//! ```
|
||||
|
||||
#![warn(missing_docs)]
|
||||
|
||||
use crate::{chunk, encoder, DecodingError, EncodingError};
|
||||
use fdeflate::BoundedDecompressionError;
|
||||
use flate2::write::ZlibEncoder;
|
||||
use flate2::Compression;
|
||||
use std::{convert::TryFrom, io::Write};
|
||||
|
||||
/// Default decompression limit for compressed text chunks.
|
||||
pub const DECOMPRESSION_LIMIT: usize = 2097152; // 2 MiB
|
||||
|
||||
/// Text encoding errors that is wrapped by the standard EncodingError type
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) enum TextEncodingError {
|
||||
/// Unrepresentable characters in string
|
||||
Unrepresentable,
|
||||
/// Keyword longer than 79 bytes or empty
|
||||
InvalidKeywordSize,
|
||||
/// Error encountered while compressing text
|
||||
CompressionError,
|
||||
}
|
||||
|
||||
/// Text decoding error that is wrapped by the standard DecodingError type
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) enum TextDecodingError {
|
||||
/// Unrepresentable characters in string
|
||||
Unrepresentable,
|
||||
/// Keyword longer than 79 bytes or empty
|
||||
InvalidKeywordSize,
|
||||
/// Missing null separator
|
||||
MissingNullSeparator,
|
||||
/// Compressed text cannot be uncompressed
|
||||
InflationError,
|
||||
/// Needs more space to decompress
|
||||
OutOfDecompressionSpace,
|
||||
/// Using an unspecified value for the compression method
|
||||
InvalidCompressionMethod,
|
||||
/// Using a byte that is not 0 or 255 as compression flag in iTXt chunk
|
||||
InvalidCompressionFlag,
|
||||
/// Missing the compression flag
|
||||
MissingCompressionFlag,
|
||||
}
|
||||
|
||||
/// A generalized text chunk trait
|
||||
pub trait EncodableTextChunk {
|
||||
/// Encode text chunk as Vec<u8> to a `Write`
|
||||
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError>;
|
||||
}
|
||||
|
||||
/// Struct representing a tEXt chunk
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct TEXtChunk {
|
||||
/// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
|
||||
pub keyword: String,
|
||||
/// Text field of tEXt chunk. Can be at most 2GB.
|
||||
pub text: String,
|
||||
}
|
||||
|
||||
fn decode_iso_8859_1(text: &[u8]) -> String {
|
||||
text.iter().map(|&b| b as char).collect()
|
||||
}
|
||||
|
||||
fn encode_iso_8859_1(text: &str) -> Result<Vec<u8>, TextEncodingError> {
|
||||
encode_iso_8859_1_iter(text).collect()
|
||||
}
|
||||
|
||||
fn encode_iso_8859_1_into(buf: &mut Vec<u8>, text: &str) -> Result<(), TextEncodingError> {
|
||||
for b in encode_iso_8859_1_iter(text) {
|
||||
buf.push(b?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn encode_iso_8859_1_iter(text: &str) -> impl Iterator<Item = Result<u8, TextEncodingError>> + '_ {
|
||||
text.chars()
|
||||
.map(|c| u8::try_from(c as u32).map_err(|_| TextEncodingError::Unrepresentable))
|
||||
}
|
||||
|
||||
fn decode_ascii(text: &[u8]) -> Result<&str, TextDecodingError> {
|
||||
if text.is_ascii() {
|
||||
// `from_utf8` cannot panic because we're already checked that `text` is ASCII-7.
|
||||
// And this is the only safe way to get ASCII-7 string from `&[u8]`.
|
||||
Ok(std::str::from_utf8(text).expect("unreachable"))
|
||||
} else {
|
||||
Err(TextDecodingError::Unrepresentable)
|
||||
}
|
||||
}
|
||||
|
||||
impl TEXtChunk {
|
||||
/// Constructs a new TEXtChunk.
|
||||
/// Not sure whether it should take &str or String.
|
||||
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
|
||||
Self {
|
||||
keyword: keyword.into(),
|
||||
text: text.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes a slice of bytes to a String using Latin-1 decoding.
|
||||
/// The decoder runs in strict mode, and any decoding errors are passed along to the caller.
|
||||
pub(crate) fn decode(
|
||||
keyword_slice: &[u8],
|
||||
text_slice: &[u8],
|
||||
) -> Result<Self, TextDecodingError> {
|
||||
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
|
||||
return Err(TextDecodingError::InvalidKeywordSize);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
keyword: decode_iso_8859_1(keyword_slice),
|
||||
text: decode_iso_8859_1(text_slice),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodableTextChunk for TEXtChunk {
|
||||
/// Encodes TEXtChunk to a Writer. The keyword and text are separated by a byte of zeroes.
|
||||
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
|
||||
let mut data = encode_iso_8859_1(&self.keyword)?;
|
||||
|
||||
if data.is_empty() || data.len() > 79 {
|
||||
return Err(TextEncodingError::InvalidKeywordSize.into());
|
||||
}
|
||||
|
||||
data.push(0);
|
||||
|
||||
encode_iso_8859_1_into(&mut data, &self.text)?;
|
||||
|
||||
encoder::write_chunk(w, chunk::tEXt, &data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct representing a zTXt chunk
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ZTXtChunk {
|
||||
/// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
|
||||
pub keyword: String,
|
||||
/// Text field of zTXt chunk. It is compressed by default, but can be uncompressed if necessary.
|
||||
text: OptCompressed,
|
||||
}
|
||||
|
||||
/// Private enum encoding the compressed and uncompressed states of zTXt/iTXt text field.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
enum OptCompressed {
|
||||
/// Compressed version of text field. Can be at most 2GB.
|
||||
Compressed(Vec<u8>),
|
||||
/// Uncompressed text field.
|
||||
Uncompressed(String),
|
||||
}
|
||||
|
||||
impl ZTXtChunk {
|
||||
/// Creates a new ZTXt chunk.
|
||||
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
|
||||
Self {
|
||||
keyword: keyword.into(),
|
||||
text: OptCompressed::Uncompressed(text.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn decode(
|
||||
keyword_slice: &[u8],
|
||||
compression_method: u8,
|
||||
text_slice: &[u8],
|
||||
) -> Result<Self, TextDecodingError> {
|
||||
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
|
||||
return Err(TextDecodingError::InvalidKeywordSize);
|
||||
}
|
||||
|
||||
if compression_method != 0 {
|
||||
return Err(TextDecodingError::InvalidCompressionMethod);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
keyword: decode_iso_8859_1(keyword_slice),
|
||||
text: OptCompressed::Compressed(text_slice.to_vec()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
|
||||
pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
|
||||
self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
|
||||
}
|
||||
|
||||
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
|
||||
pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
let uncompressed_raw = match fdeflate::decompress_to_vec_bounded(&v[..], limit) {
|
||||
Ok(s) => s,
|
||||
Err(BoundedDecompressionError::OutputTooLarge { .. }) => {
|
||||
return Err(DecodingError::from(
|
||||
TextDecodingError::OutOfDecompressionSpace,
|
||||
));
|
||||
}
|
||||
Err(_) => {
|
||||
return Err(DecodingError::from(TextDecodingError::InflationError));
|
||||
}
|
||||
};
|
||||
self.text = OptCompressed::Uncompressed(decode_iso_8859_1(&uncompressed_raw));
|
||||
}
|
||||
OptCompressed::Uncompressed(_) => {}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decompresses the inner text, and returns it as a `String`.
|
||||
/// If decompression uses more the 2MiB, first call decompress with limit, and then this method.
|
||||
pub fn get_text(&self) -> Result<String, DecodingError> {
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
let uncompressed_raw = fdeflate::decompress_to_vec(v)
|
||||
.map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
|
||||
Ok(decode_iso_8859_1(&uncompressed_raw))
|
||||
}
|
||||
OptCompressed::Uncompressed(s) => Ok(s.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compresses the inner text, mutating its own state.
|
||||
pub fn compress_text(&mut self) -> Result<(), EncodingError> {
|
||||
match &self.text {
|
||||
OptCompressed::Uncompressed(s) => {
|
||||
let uncompressed_raw = encode_iso_8859_1(s)?;
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
|
||||
encoder
|
||||
.write_all(&uncompressed_raw)
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
self.text = OptCompressed::Compressed(
|
||||
encoder
|
||||
.finish()
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
|
||||
);
|
||||
}
|
||||
OptCompressed::Compressed(_) => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodableTextChunk for ZTXtChunk {
|
||||
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
|
||||
let mut data = encode_iso_8859_1(&self.keyword)?;
|
||||
|
||||
if data.is_empty() || data.len() > 79 {
|
||||
return Err(TextEncodingError::InvalidKeywordSize.into());
|
||||
}
|
||||
|
||||
// Null separator
|
||||
data.push(0);
|
||||
|
||||
// Compression method: the only valid value is 0, as of 2021.
|
||||
data.push(0);
|
||||
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
data.extend_from_slice(&v[..]);
|
||||
}
|
||||
OptCompressed::Uncompressed(s) => {
|
||||
// This code may have a bug. Check for correctness.
|
||||
let uncompressed_raw = encode_iso_8859_1(s)?;
|
||||
let mut encoder = ZlibEncoder::new(data, Compression::fast());
|
||||
encoder
|
||||
.write_all(&uncompressed_raw)
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
data = encoder
|
||||
.finish()
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
}
|
||||
};
|
||||
|
||||
encoder::write_chunk(w, chunk::zTXt, &data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Struct encoding an iTXt chunk
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ITXtChunk {
|
||||
/// The keyword field. This needs to be between 1-79 bytes when encoded as Latin-1.
|
||||
pub keyword: String,
|
||||
/// Indicates whether the text will be (or was) compressed in the PNG.
|
||||
pub compressed: bool,
|
||||
/// A hyphen separated list of languages that the keyword is translated to. This is ASCII-7 encoded.
|
||||
pub language_tag: String,
|
||||
/// Translated keyword. This is UTF-8 encoded.
|
||||
pub translated_keyword: String,
|
||||
/// Text field of iTXt chunk. It is compressed by default, but can be uncompressed if necessary.
|
||||
text: OptCompressed,
|
||||
}
|
||||
|
||||
impl ITXtChunk {
|
||||
/// Constructs a new iTXt chunk. Leaves all but keyword and text to default values.
|
||||
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
|
||||
Self {
|
||||
keyword: keyword.into(),
|
||||
compressed: false,
|
||||
language_tag: "".to_string(),
|
||||
translated_keyword: "".to_string(),
|
||||
text: OptCompressed::Uncompressed(text.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn decode(
|
||||
keyword_slice: &[u8],
|
||||
compression_flag: u8,
|
||||
compression_method: u8,
|
||||
language_tag_slice: &[u8],
|
||||
translated_keyword_slice: &[u8],
|
||||
text_slice: &[u8],
|
||||
) -> Result<Self, TextDecodingError> {
|
||||
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
|
||||
return Err(TextDecodingError::InvalidKeywordSize);
|
||||
}
|
||||
let keyword = decode_iso_8859_1(keyword_slice);
|
||||
|
||||
let compressed = match compression_flag {
|
||||
0 => false,
|
||||
1 => true,
|
||||
_ => return Err(TextDecodingError::InvalidCompressionFlag),
|
||||
};
|
||||
|
||||
if compressed && compression_method != 0 {
|
||||
return Err(TextDecodingError::InvalidCompressionMethod);
|
||||
}
|
||||
|
||||
let language_tag = decode_ascii(language_tag_slice)?.to_owned();
|
||||
|
||||
let translated_keyword = std::str::from_utf8(translated_keyword_slice)
|
||||
.map_err(|_| TextDecodingError::Unrepresentable)?
|
||||
.to_string();
|
||||
let text = if compressed {
|
||||
OptCompressed::Compressed(text_slice.to_vec())
|
||||
} else {
|
||||
OptCompressed::Uncompressed(
|
||||
String::from_utf8(text_slice.to_vec())
|
||||
.map_err(|_| TextDecodingError::Unrepresentable)?,
|
||||
)
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
keyword,
|
||||
compressed,
|
||||
language_tag,
|
||||
translated_keyword,
|
||||
text,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
|
||||
pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
|
||||
self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
|
||||
}
|
||||
|
||||
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
|
||||
pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
let uncompressed_raw = match fdeflate::decompress_to_vec_bounded(v, limit) {
|
||||
Ok(s) => s,
|
||||
Err(BoundedDecompressionError::OutputTooLarge { .. }) => {
|
||||
return Err(DecodingError::from(
|
||||
TextDecodingError::OutOfDecompressionSpace,
|
||||
));
|
||||
}
|
||||
Err(_) => {
|
||||
return Err(DecodingError::from(TextDecodingError::InflationError));
|
||||
}
|
||||
};
|
||||
self.text = OptCompressed::Uncompressed(
|
||||
String::from_utf8(uncompressed_raw)
|
||||
.map_err(|_| TextDecodingError::Unrepresentable)?,
|
||||
);
|
||||
}
|
||||
OptCompressed::Uncompressed(_) => {}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decompresses the inner text, and returns it as a `String`.
|
||||
/// If decompression takes more than 2 MiB, try `decompress_text_with_limit` followed by this method.
|
||||
pub fn get_text(&self) -> Result<String, DecodingError> {
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
let uncompressed_raw = fdeflate::decompress_to_vec(v)
|
||||
.map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
|
||||
String::from_utf8(uncompressed_raw)
|
||||
.map_err(|_| TextDecodingError::Unrepresentable.into())
|
||||
}
|
||||
OptCompressed::Uncompressed(s) => Ok(s.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compresses the inner text, mutating its own state.
|
||||
pub fn compress_text(&mut self) -> Result<(), EncodingError> {
|
||||
match &self.text {
|
||||
OptCompressed::Uncompressed(s) => {
|
||||
let uncompressed_raw = s.as_bytes();
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
|
||||
encoder
|
||||
.write_all(uncompressed_raw)
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
self.text = OptCompressed::Compressed(
|
||||
encoder
|
||||
.finish()
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
|
||||
);
|
||||
}
|
||||
OptCompressed::Compressed(_) => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodableTextChunk for ITXtChunk {
|
||||
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
|
||||
// Keyword
|
||||
let mut data = encode_iso_8859_1(&self.keyword)?;
|
||||
|
||||
if data.is_empty() || data.len() > 79 {
|
||||
return Err(TextEncodingError::InvalidKeywordSize.into());
|
||||
}
|
||||
|
||||
// Null separator
|
||||
data.push(0);
|
||||
|
||||
// Compression flag
|
||||
if self.compressed {
|
||||
data.push(1);
|
||||
} else {
|
||||
data.push(0);
|
||||
}
|
||||
|
||||
// Compression method
|
||||
data.push(0);
|
||||
|
||||
// Language tag
|
||||
if !self.language_tag.is_ascii() {
|
||||
return Err(EncodingError::from(TextEncodingError::Unrepresentable));
|
||||
}
|
||||
data.extend(self.language_tag.as_bytes());
|
||||
|
||||
// Null separator
|
||||
data.push(0);
|
||||
|
||||
// Translated keyword
|
||||
data.extend_from_slice(self.translated_keyword.as_bytes());
|
||||
|
||||
// Null separator
|
||||
data.push(0);
|
||||
|
||||
// Text
|
||||
if self.compressed {
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
data.extend_from_slice(&v[..]);
|
||||
}
|
||||
OptCompressed::Uncompressed(s) => {
|
||||
let uncompressed_raw = s.as_bytes();
|
||||
let mut encoder = ZlibEncoder::new(data, Compression::fast());
|
||||
encoder
|
||||
.write_all(uncompressed_raw)
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
data = encoder
|
||||
.finish()
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match &self.text {
|
||||
OptCompressed::Compressed(v) => {
|
||||
let uncompressed_raw = fdeflate::decompress_to_vec(v)
|
||||
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
|
||||
data.extend_from_slice(&uncompressed_raw[..]);
|
||||
}
|
||||
OptCompressed::Uncompressed(s) => {
|
||||
data.extend_from_slice(s.as_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
encoder::write_chunk(w, chunk::iTXt, &data)
|
||||
}
|
||||
}
|
||||
43
third-party/vendor/png/src/traits.rs
vendored
Normal file
43
third-party/vendor/png/src/traits.rs
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
use std::io;
|
||||
|
||||
macro_rules! read_bytes_ext {
|
||||
($output_type:ty) => {
|
||||
impl<W: io::Read + ?Sized> ReadBytesExt<$output_type> for W {
|
||||
#[inline]
|
||||
fn read_be(&mut self) -> io::Result<$output_type> {
|
||||
let mut bytes = [0u8; std::mem::size_of::<$output_type>()];
|
||||
self.read_exact(&mut bytes)?;
|
||||
Ok(<$output_type>::from_be_bytes(bytes))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! write_bytes_ext {
|
||||
($input_type:ty) => {
|
||||
impl<W: io::Write + ?Sized> WriteBytesExt<$input_type> for W {
|
||||
#[inline]
|
||||
fn write_be(&mut self, n: $input_type) -> io::Result<()> {
|
||||
self.write_all(&n.to_be_bytes())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Read extension to read big endian data
|
||||
pub trait ReadBytesExt<T>: io::Read {
|
||||
/// Read `T` from a bytes stream. Most significant byte first.
|
||||
fn read_be(&mut self) -> io::Result<T>;
|
||||
}
|
||||
|
||||
/// Write extension to write big endian data
|
||||
pub trait WriteBytesExt<T>: io::Write {
|
||||
/// Writes `T` to a bytes stream. Most significant byte first.
|
||||
fn write_be(&mut self, _: T) -> io::Result<()>;
|
||||
}
|
||||
|
||||
read_bytes_ext!(u8);
|
||||
read_bytes_ext!(u16);
|
||||
read_bytes_ext!(u32);
|
||||
|
||||
write_bytes_ext!(u32);
|
||||
Loading…
Add table
Add a link
Reference in a new issue