Vendor things
This commit is contained in:
parent
5deceec006
commit
977e3c17e5
19434 changed files with 10682014 additions and 0 deletions
1
third-party/vendor/indexmap/.cargo-checksum.json
vendored
Normal file
1
third-party/vendor/indexmap/.cargo-checksum.json
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"5e4e161e85d5f1beb969e59267753ecd65383d5d6ab9ceb702d1a92234641373","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ecc269ef87fd38a1d98e30bfac9ba964a9dbd9315c3770fed98d4d7cb5882055","README.md":"98781168dcaf64ed3eecbf7146311045433773486259a372b985c9ae28545017","RELEASES.md":"4fad7ac1991f2f145957a92eb4c548beda66d80ae8492744227a9a7d00f55454","benches/bench.rs":"3b2900abbc9e8a60af78b0395222ee75e86bc68519a0f38477387d1572eed397","benches/faststring.rs":"5fdd6cdb19d0557ed58f241e809a240cf8939d9e5b87a72d5f127f81ab98380b","src/arbitrary.rs":"068713b1e8e762dbe9e4d19d555e77c17e59408335a40f4777d6100340605655","src/borsh.rs":"8c93ca6ab61af017c5ed52d9d4a1bc0444314db3b8edbb7d21897ae87e25ae99","src/lib.rs":"3be42cdd6ced7e7efbd27053f2e7802152e08ae1f2dc6b2019740806486c711a","src/macros.rs":"1493afee2db2c9744be99e5fc32d0cd3742b8a3dbb31fa786ebbe28f1387e7d6","src/map.rs":"31cbd0fcb14a1bc09566176eb02c186d49a0ed69afde12bd7fb0464bf0de92e9","src/map/core.rs":"eb291748598e1aa9112ee068a9a5fea7b1190114c4007de179528408f7439faa","src/map/core/entry.rs":"7d987cc5099099e0c9020afa7b3c23d20f11812dce9d684db03ff390f4068691","src/map/core/raw.rs":"30cc048f7c7f0deb2eb1c699a15e6b18dff1187404228c55e404c0d5bfe11f3e","src/map/core/raw_entry_v1.rs":"0a6aff175c8e92139af92ae629b6657e2f671d559a6f0c6529b92f35dcff4056","src/map/iter.rs":"ae0023c1ccc78ac765dc43fe51f233f2fca1ebba132490db8429941183d2c497","src/map/serde_seq.rs":"ce06b5bb816c15ea8fe3d2c358baa90fe6f45ecb585623a69592d6de69319194","src/map/slice.rs":"40f063e78f0658b89cc3f6dfdedbd251c9cd00a2cb44be2404ce0020a7e8fac3","src/map/tests.rs":"714674a55103dc57c16d9fe2916d462a655dc217e079a7e0ed5a62ab0f9749f1","src/mutable_keys.rs":"312f70e0e595b88977eb77e43dd1a4f282566048fb0cadb5d13fd87835fd9e0b","src/rayon/map.rs":"0fad36851fdf6894695e526c684c9b3afeac82e29016e6a523eea68cc3b2d19d","src/rayon/mod.rs":"1c9c13b5cf6974f652ded53b014774944c761f079a77a61b3bc52eaa3d4b972b","src/rayon/set.rs":"4b076dbfd9a7eb2fd65783f1c8a5acabe075403f3d05e30c337676acec25d8ee","src/rustc.rs":"fe7a348c5a10a66880cb6c737593fe79d3b6de40f44ba0d7b89204aa95e14a3a","src/serde.rs":"23fd6b5e8f6795e4121693ac16dab61e76c4d8c83e6da1b3ef26d081fae28e79","src/set.rs":"0dc56684c586fc93df85a3731edd9c9b6f3a7990feb61902f83b94466443c182","src/set/iter.rs":"a387c48eff0338b5c8f4e2059403bd665a9c8037634cc5860f80b72a4fbbed30","src/set/slice.rs":"f3101f971da512fa7d9da1fab3ae76489ed082bc0f94300246ed705cf597e6eb","src/set/tests.rs":"d9c182cd776ca9182b5284f06eacc70c5f33a66aff3021c35134ffd1d7630c05","src/util.rs":"dbd57cfdac2a72db8c5ce83bf288bcaf33b5ae59adddcd088792a624c4c0e909","tests/equivalent_trait.rs":"efe9393069e3cfc893d2c9c0343679979578e437fdb98a10baefeced027ba310","tests/macros_full_path.rs":"c33c86d7341581fdd08e2e6375a4afca507fa603540c54a3b9e51c4cd011cd71","tests/quick.rs":"9759dcc34d86d9635d9d18be6358f5f3e3c0f995874b64b5a7ca4b582f4acedb","tests/tests.rs":"f6dbeeb0e2950402b0e66ac52bf74c9e4197d3c5d9c0dde64a7998a2ef74d327"},"package":"7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"}
|
||||
121
third-party/vendor/indexmap/Cargo.toml
vendored
Normal file
121
third-party/vendor/indexmap/Cargo.toml
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2021"
|
||||
rust-version = "1.63"
|
||||
name = "indexmap"
|
||||
version = "2.2.5"
|
||||
description = "A hash table with consistent order and fast iteration."
|
||||
documentation = "https://docs.rs/indexmap/"
|
||||
readme = "README.md"
|
||||
keywords = [
|
||||
"hashmap",
|
||||
"no_std",
|
||||
]
|
||||
categories = [
|
||||
"data-structures",
|
||||
"no-std",
|
||||
]
|
||||
license = "Apache-2.0 OR MIT"
|
||||
repository = "https://github.com/indexmap-rs/indexmap"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = [
|
||||
"arbitrary",
|
||||
"quickcheck",
|
||||
"serde",
|
||||
"borsh",
|
||||
"rayon",
|
||||
]
|
||||
rustdoc-args = [
|
||||
"--cfg",
|
||||
"docsrs",
|
||||
]
|
||||
|
||||
[package.metadata.release]
|
||||
no-dev-version = true
|
||||
tag-name = "{{version}}"
|
||||
|
||||
[profile.bench]
|
||||
debug = 2
|
||||
|
||||
[lib]
|
||||
bench = false
|
||||
|
||||
[dependencies.arbitrary]
|
||||
version = "1.0"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.borsh]
|
||||
version = "1.2"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.equivalent]
|
||||
version = "1.0"
|
||||
default-features = false
|
||||
|
||||
[dependencies.hashbrown]
|
||||
version = "0.14.1"
|
||||
features = ["raw"]
|
||||
default-features = false
|
||||
|
||||
[dependencies.quickcheck]
|
||||
version = "1.0"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.rayon]
|
||||
version = "1.5.3"
|
||||
optional = true
|
||||
|
||||
[dependencies.rustc-rayon]
|
||||
version = "0.5"
|
||||
optional = true
|
||||
package = "rustc-rayon"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1.0"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.fnv]
|
||||
version = "1.0"
|
||||
|
||||
[dev-dependencies.fxhash]
|
||||
version = "0.2.1"
|
||||
|
||||
[dev-dependencies.itertools]
|
||||
version = "0.12"
|
||||
|
||||
[dev-dependencies.lazy_static]
|
||||
version = "1.3"
|
||||
|
||||
[dev-dependencies.quickcheck]
|
||||
version = "1.0"
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.rand]
|
||||
version = "0.8"
|
||||
features = ["small_rng"]
|
||||
|
||||
[dev-dependencies.serde_derive]
|
||||
version = "1.0"
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = []
|
||||
test_debug = []
|
||||
|
||||
[lints.clippy]
|
||||
style = "allow"
|
||||
201
third-party/vendor/indexmap/LICENSE-APACHE
vendored
Normal file
201
third-party/vendor/indexmap/LICENSE-APACHE
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
25
third-party/vendor/indexmap/LICENSE-MIT
vendored
Normal file
25
third-party/vendor/indexmap/LICENSE-MIT
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2016--2017
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
57
third-party/vendor/indexmap/README.md
vendored
Normal file
57
third-party/vendor/indexmap/README.md
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# indexmap
|
||||
|
||||
[](https://github.com/indexmap-rs/indexmap/actions)
|
||||
[](https://crates.io/crates/indexmap)
|
||||
[](https://docs.rs/indexmap)
|
||||
[](https://img.shields.io/badge/rust-1.63%2B-orange.svg)
|
||||
|
||||
A pure-Rust hash table which preserves (in a limited sense) insertion order.
|
||||
|
||||
This crate implements compact map and set data-structures,
|
||||
where the iteration order of the keys is independent from their hash or
|
||||
value. It preserves insertion order (except after removals), and it
|
||||
allows lookup of entries by either hash table key or numerical index.
|
||||
|
||||
Note: this crate was originally released under the name `ordermap`,
|
||||
but it was renamed to `indexmap` to better reflect its features.
|
||||
|
||||
# Background
|
||||
|
||||
This was inspired by Python 3.6's new dict implementation (which remembers
|
||||
the insertion order and is fast to iterate, and is compact in memory).
|
||||
|
||||
Some of those features were translated to Rust, and some were not. The result
|
||||
was indexmap, a hash table that has following properties:
|
||||
|
||||
- Order is **independent of hash function** and hash values of keys.
|
||||
- Fast to iterate.
|
||||
- Indexed in compact space.
|
||||
- Preserves insertion order **as long** as you don't call `.remove()`,
|
||||
`.swap_remove()`, or other methods that explicitly change order.
|
||||
The alternate `.shift_remove()` does preserve relative order.
|
||||
- Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does.
|
||||
|
||||
## Performance
|
||||
|
||||
`IndexMap` derives a couple of performance facts directly from how it is constructed,
|
||||
which is roughly:
|
||||
|
||||
> A raw hash table of key-value indices, and a vector of key-value pairs.
|
||||
|
||||
- Iteration is very fast since it is on the dense key-values.
|
||||
- Removal is fast since it moves memory areas only in the table,
|
||||
and uses a single swap in the vector.
|
||||
- Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are
|
||||
densely stored. Lookup also is slow-ish since the actual key-value pairs are stored
|
||||
separately. (Visible when cpu caches size is limiting.)
|
||||
|
||||
- In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and
|
||||
the performance was roughly on par across the whole workload.
|
||||
- If you want the properties of `IndexMap`, or its strongest performance points
|
||||
fits your workload, it might be the best hash table implementation.
|
||||
|
||||
[PR45282]: https://github.com/rust-lang/rust/pull/45282
|
||||
|
||||
# Recent Changes
|
||||
|
||||
See [RELEASES.md](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md).
|
||||
504
third-party/vendor/indexmap/RELEASES.md
vendored
Normal file
504
third-party/vendor/indexmap/RELEASES.md
vendored
Normal file
|
|
@ -0,0 +1,504 @@
|
|||
# Releases
|
||||
|
||||
## 2.2.5
|
||||
|
||||
- Added optional `borsh` serialization support.
|
||||
|
||||
## 2.2.4
|
||||
|
||||
- Added an `insert_sorted` method on `IndexMap`, `IndexSet`, and `VacantEntry`.
|
||||
- Avoid hashing for lookups in single-entry maps.
|
||||
- Limit preallocated memory in `serde` deserializers.
|
||||
|
||||
## 2.2.3
|
||||
|
||||
- Added `move_index` and `swap_indices` methods to `IndexedEntry`,
|
||||
`OccupiedEntry`, and `RawOccupiedEntryMut`, functioning like the existing
|
||||
methods on `IndexMap`.
|
||||
- Added `shift_insert` methods on `VacantEntry` and `RawVacantEntryMut`, as
|
||||
well as `shift_insert_hashed_nocheck` on the latter, to insert the new entry
|
||||
at a particular index.
|
||||
- Added `shift_insert` methods on `IndexMap` and `IndexSet` to insert a new
|
||||
entry at a particular index, or else move an existing entry there.
|
||||
|
||||
## 2.2.2
|
||||
|
||||
- Added indexing methods to raw entries: `RawEntryBuilder::from_hash_full`,
|
||||
`RawEntryBuilder::index_from_hash`, and `RawEntryMut::index`.
|
||||
|
||||
## 2.2.1
|
||||
|
||||
- Corrected the signature of `RawOccupiedEntryMut::into_key(self) -> &'a mut K`,
|
||||
This a breaking change from 2.2.0, but that version was published for less
|
||||
than a day and has now been yanked.
|
||||
|
||||
## 2.2.0
|
||||
|
||||
- The new `IndexMap::get_index_entry` method finds an entry by its index for
|
||||
in-place manipulation.
|
||||
|
||||
- The `Keys` iterator now implements `Index<usize>` for quick access to the
|
||||
entry's key, compared to indexing the map to get the value.
|
||||
|
||||
- The new `IndexMap::splice` and `IndexSet::splice` methods will drain the
|
||||
given range as an iterator, and then replace that range with entries from
|
||||
an input iterator.
|
||||
|
||||
- The new trait `RawEntryApiV1` offers opt-in access to a raw entry API for
|
||||
`IndexMap`, corresponding to the unstable API on `HashSet` as of Rust 1.75.
|
||||
|
||||
- Many `IndexMap` and `IndexSet` methods have relaxed their type constraints,
|
||||
e.g. removing `K: Hash` on methods that don't actually need to hash.
|
||||
|
||||
- Removal methods `remove`, `remove_entry`, and `take` are now deprecated
|
||||
in favor of their `shift_` or `swap_` prefixed variants, which are more
|
||||
explicit about their effect on the index and order of remaining items.
|
||||
The deprecated methods will remain to guide drop-in replacements from
|
||||
`HashMap` and `HashSet` toward the prefixed methods.
|
||||
|
||||
## 2.1.0
|
||||
|
||||
- Empty slices can now be created with `map::Slice::{new, new_mut}` and
|
||||
`set::Slice::new`. In addition, `Slice::new`, `len`, and `is_empty` are
|
||||
now `const` functions on both types.
|
||||
|
||||
- `IndexMap`, `IndexSet`, and their respective `Slice`s all have binary
|
||||
search methods for sorted data: map `binary_search_keys` and set
|
||||
`binary_search` for plain comparison, `binary_search_by` for custom
|
||||
comparators, `binary_search_by_key` for key extraction, and
|
||||
`partition_point` for boolean conditions.
|
||||
|
||||
## 2.0.2
|
||||
|
||||
- The `hashbrown` dependency has been updated to version 0.14.1 to
|
||||
complete the support for Rust 1.63.
|
||||
|
||||
## 2.0.1
|
||||
|
||||
- **MSRV**: Rust 1.63.0 is now supported as well, pending publication of
|
||||
`hashbrown`'s relaxed MSRV (or use cargo `--ignore-rust-version`).
|
||||
|
||||
## 2.0.0
|
||||
|
||||
- **MSRV**: Rust 1.64.0 or later is now required.
|
||||
|
||||
- The `"std"` feature is no longer auto-detected. It is included in the
|
||||
default feature set, or else can be enabled like any other Cargo feature.
|
||||
|
||||
- The `"serde-1"` feature has been removed, leaving just the optional
|
||||
`"serde"` dependency to be enabled like a feature itself.
|
||||
|
||||
- `IndexMap::get_index_mut` now returns `Option<(&K, &mut V)>`, changing
|
||||
the key part from `&mut K` to `&K`. There is also a new alternative
|
||||
`MutableKeys::get_index_mut2` to access the former behavior.
|
||||
|
||||
- The new `map::Slice<K, V>` and `set::Slice<T>` offer a linear view of maps
|
||||
and sets, behaving a lot like normal `[(K, V)]` and `[T]` slices. Notably,
|
||||
comparison traits like `Eq` only consider items in order, rather than hash
|
||||
lookups, and slices even implement `Hash`.
|
||||
|
||||
- `IndexMap` and `IndexSet` now have `sort_by_cached_key` and
|
||||
`par_sort_by_cached_key` methods which perform stable sorts in place
|
||||
using a key extraction function.
|
||||
|
||||
- `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and
|
||||
`try_reserve_exact` methods that correspond to the same methods on `Vec`.
|
||||
However, exactness only applies to the direct capacity for items, while the
|
||||
raw hash table still follows its own rules for capacity and load factor.
|
||||
|
||||
- The `Equivalent` trait is now re-exported from the `equivalent` crate,
|
||||
intended as a common base to allow types to work with multiple map types.
|
||||
|
||||
- The `hashbrown` dependency has been updated to version 0.14.
|
||||
|
||||
- The `serde_seq` module has been moved from the crate root to below the
|
||||
`map` module.
|
||||
|
||||
## 1.9.3
|
||||
|
||||
- Bump the `rustc-rayon` dependency, for compiler use only.
|
||||
|
||||
## 1.9.2
|
||||
|
||||
- `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and
|
||||
`quickcheck::Arbitrary` if those optional dependency features are enabled.
|
||||
|
||||
## 1.9.1
|
||||
|
||||
- The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown`
|
||||
0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0
|
||||
until there is a later published version relaxing its requirement.
|
||||
|
||||
## 1.9.0
|
||||
|
||||
- **MSRV**: Rust 1.56.1 or later is now required.
|
||||
|
||||
- The `hashbrown` dependency has been updated to version 0.12.
|
||||
|
||||
- `IterMut` and `ValuesMut` now implement `Debug`.
|
||||
|
||||
- The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink
|
||||
the capacity with a lower bound.
|
||||
|
||||
- The new `IndexMap::move_index` and `IndexSet::move_index` methods change
|
||||
the position of an item from one index to another, shifting the items
|
||||
between to accommodate the move.
|
||||
|
||||
## 1.8.2
|
||||
|
||||
- Bump the `rustc-rayon` dependency, for compiler use only.
|
||||
|
||||
## 1.8.1
|
||||
|
||||
- The new `IndexSet::replace_full` will return the index of the item along
|
||||
with the replaced value, if any, by @zakcutner in PR [222].
|
||||
|
||||
[222]: https://github.com/indexmap-rs/indexmap/pull/222
|
||||
|
||||
## 1.8.0
|
||||
|
||||
- The new `IndexMap::into_keys` and `IndexMap::into_values` will consume
|
||||
the map into keys or values, respectively, matching Rust 1.54's `HashMap`
|
||||
methods, by @taiki-e in PR [195].
|
||||
|
||||
- More of the iterator types implement `Debug`, `ExactSizeIterator`, and
|
||||
`FusedIterator`, by @cuviper in PR [196].
|
||||
|
||||
- `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`,
|
||||
by @cuviper in PR [197].
|
||||
|
||||
- `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const`
|
||||
functions, allowing static maps and sets, by @mwillsey in PR [203].
|
||||
|
||||
- `IndexMap` and `IndexSet` now implement `From` for arrays, matching
|
||||
Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205].
|
||||
|
||||
- `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`,
|
||||
`sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents,
|
||||
which sort in-place without preserving the order of equal items, by
|
||||
@bhgomes in PR [211].
|
||||
|
||||
[195]: https://github.com/indexmap-rs/indexmap/pull/195
|
||||
[196]: https://github.com/indexmap-rs/indexmap/pull/196
|
||||
[197]: https://github.com/indexmap-rs/indexmap/pull/197
|
||||
[203]: https://github.com/indexmap-rs/indexmap/pull/203
|
||||
[205]: https://github.com/indexmap-rs/indexmap/pull/205
|
||||
[211]: https://github.com/indexmap-rs/indexmap/pull/211
|
||||
|
||||
## 1.7.0
|
||||
|
||||
- **MSRV**: Rust 1.49 or later is now required.
|
||||
|
||||
- The `hashbrown` dependency has been updated to version 0.11.
|
||||
|
||||
## 1.6.2
|
||||
|
||||
- Fixed to match `std` behavior, `OccupiedEntry::key` now references the
|
||||
existing key in the map instead of the lookup key, by @cuviper in PR [170].
|
||||
|
||||
- The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method,
|
||||
passing `&K` to the callback to create a value, by @cuviper in PR [175].
|
||||
|
||||
[170]: https://github.com/indexmap-rs/indexmap/pull/170
|
||||
[175]: https://github.com/indexmap-rs/indexmap/pull/175
|
||||
|
||||
## 1.6.1
|
||||
|
||||
- The new `serde_seq` module implements `IndexMap` serialization as a
|
||||
sequence to ensure order is preserved, by @cuviper in PR [158].
|
||||
|
||||
- New methods on maps and sets work like the `Vec`/slice methods by the same name:
|
||||
`truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and
|
||||
`swap_indices`, by @cuviper in PR [160].
|
||||
|
||||
[158]: https://github.com/indexmap-rs/indexmap/pull/158
|
||||
[160]: https://github.com/indexmap-rs/indexmap/pull/160
|
||||
|
||||
## 1.6.0
|
||||
|
||||
- **MSRV**: Rust 1.36 or later is now required.
|
||||
|
||||
- The `hashbrown` dependency has been updated to version 0.9.
|
||||
|
||||
## 1.5.2
|
||||
|
||||
- The new "std" feature will force the use of `std` for users that explicitly
|
||||
want the default `S = RandomState`, bypassing the autodetection added in 1.3.0,
|
||||
by @cuviper in PR [145].
|
||||
|
||||
[145]: https://github.com/indexmap-rs/indexmap/pull/145
|
||||
|
||||
## 1.5.1
|
||||
|
||||
- Values can now be indexed by their `usize` position by @cuviper in PR [132].
|
||||
|
||||
- Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141].
|
||||
|
||||
- `drain` now accepts any `R: RangeBounds<usize>` by @cuviper in PR [142].
|
||||
|
||||
[132]: https://github.com/indexmap-rs/indexmap/pull/132
|
||||
[141]: https://github.com/indexmap-rs/indexmap/pull/141
|
||||
[142]: https://github.com/indexmap-rs/indexmap/pull/142
|
||||
|
||||
## 1.5.0
|
||||
|
||||
- **MSRV**: Rust 1.32 or later is now required.
|
||||
|
||||
- The inner hash table is now based on `hashbrown` by @cuviper in PR [131].
|
||||
This also completes the method `reserve` and adds `shrink_to_fit`.
|
||||
|
||||
- Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`,
|
||||
and `shift_remove_entry`, by @cuviper in PR [136]
|
||||
|
||||
- `Clone::clone_from` reuses allocations by @cuviper in PR [125]
|
||||
|
||||
- Add new method `reverse` by @linclelinkpart5 in PR [128]
|
||||
|
||||
[125]: https://github.com/indexmap-rs/indexmap/pull/125
|
||||
[128]: https://github.com/indexmap-rs/indexmap/pull/128
|
||||
[131]: https://github.com/indexmap-rs/indexmap/pull/131
|
||||
[136]: https://github.com/indexmap-rs/indexmap/pull/136
|
||||
|
||||
## 1.4.0
|
||||
|
||||
- Add new method `get_index_of` by @Thermatrix in PR [115] and [120]
|
||||
|
||||
- Fix build script rebuild-if-changed configuration to use "build.rs";
|
||||
fixes issue [123]. Fix by @cuviper.
|
||||
|
||||
- Dev-dependencies (rand and quickcheck) have been updated. The crate's tests
|
||||
now run using Rust 1.32 or later (MSRV for building the crate has not changed).
|
||||
by @kjeremy and @bluss
|
||||
|
||||
[123]: https://github.com/indexmap-rs/indexmap/issues/123
|
||||
[115]: https://github.com/indexmap-rs/indexmap/pull/115
|
||||
[120]: https://github.com/indexmap-rs/indexmap/pull/120
|
||||
|
||||
## 1.3.2
|
||||
|
||||
- Maintenance update to regenerate the published `Cargo.toml`.
|
||||
|
||||
## 1.3.1
|
||||
|
||||
- Maintenance update for formatting and `autocfg` 1.0.
|
||||
|
||||
## 1.3.0
|
||||
|
||||
- The deprecation messages in the previous version have been removed.
|
||||
(The methods have not otherwise changed.) Docs for removal methods have been
|
||||
improved.
|
||||
- From Rust 1.36, this crate supports being built **without std**, requiring
|
||||
`alloc` instead. This is enabled automatically when it is detected that
|
||||
`std` is not available. There is no crate feature to enable/disable to
|
||||
trigger this. The new build-dep `autocfg` enables this.
|
||||
|
||||
## 1.2.0
|
||||
|
||||
- Plain `.remove()` now has a deprecation message, it informs the user
|
||||
about picking one of the removal functions `swap_remove` and `shift_remove`
|
||||
which have different performance and order semantics.
|
||||
Plain `.remove()` will not be removed, the warning message and method
|
||||
will remain until further.
|
||||
|
||||
- Add new method `shift_remove` for order preserving removal on the map,
|
||||
and `shift_take` for the corresponding operation on the set.
|
||||
|
||||
- Add methods `swap_remove`, `swap_remove_entry` to `Entry`.
|
||||
|
||||
- Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()`
|
||||
|
||||
- Internal improvements: fix warnings, deprecations and style lints
|
||||
|
||||
## 1.1.0
|
||||
|
||||
- Added optional feature `"rayon"` that adds parallel iterator support
|
||||
to `IndexMap` and `IndexSet` using Rayon. This includes all the regular
|
||||
iterators in parallel versions, and parallel sort.
|
||||
|
||||
- Implemented `Clone` for `map::{Iter, Keys, Values}` and
|
||||
`set::{Difference, Intersection, Iter, SymmetricDifference, Union}`
|
||||
|
||||
- Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and
|
||||
`set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}`
|
||||
|
||||
- Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`.
|
||||
|
||||
- Minimum Rust version requirement increased to Rust 1.30 for development builds.
|
||||
|
||||
## 1.0.2
|
||||
|
||||
- The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are
|
||||
both like `insert` with the index included in the return value.
|
||||
|
||||
- The new method `Entry::and_modify` can be used to modify occupied
|
||||
entries, matching the new methods of `std` maps in Rust 1.26.
|
||||
|
||||
- The new method `Entry::or_default` inserts a default value in unoccupied
|
||||
entries, matching the new methods of `std` maps in Rust 1.28.
|
||||
|
||||
## 1.0.1
|
||||
|
||||
- Document Rust version policy for the crate (see rustdoc)
|
||||
|
||||
## 1.0.0
|
||||
|
||||
- This is the 1.0 release for `indexmap`! (the crate and datastructure
|
||||
formerly known as “ordermap”)
|
||||
- `OccupiedEntry::insert` changed its signature, to use `&mut self` for
|
||||
the method receiver, matching the equivalent method for a standard
|
||||
`HashMap`. Thanks to @dtolnay for finding this bug.
|
||||
- The deprecated old names from ordermap were removed: `OrderMap`,
|
||||
`OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap`
|
||||
etc names instead.
|
||||
|
||||
## 0.4.1
|
||||
|
||||
- Renamed crate to `indexmap`; the `ordermap` crate is now deprecated
|
||||
and the types `OrderMap/Set` now have a deprecation notice.
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- This is the last release series for this `ordermap` under that name,
|
||||
because the crate is **going to be renamed** to `indexmap` (with types
|
||||
`IndexMap`, `IndexSet`) and no change in functionality!
|
||||
- The map and its associated structs moved into the `map` submodule of the
|
||||
crate, so that the map and set are symmetric
|
||||
|
||||
+ The iterators, `Entry` and other structs are now under `ordermap::map::`
|
||||
|
||||
- Internally refactored `OrderMap<K, V, S>` so that all the main algorithms
|
||||
(insertion, lookup, removal etc) that don't use the `S` parameter (the
|
||||
hasher) are compiled without depending on `S`, which reduces generics bloat.
|
||||
|
||||
- `Entry<K, V>` no longer has a type parameter `S`, which is just like
|
||||
the standard `HashMap`'s entry.
|
||||
|
||||
- Minimum Rust version requirement increased to Rust 1.18
|
||||
|
||||
## 0.3.5
|
||||
|
||||
- Documentation improvements
|
||||
|
||||
## 0.3.4
|
||||
|
||||
- The `.retain()` methods for `OrderMap` and `OrderSet` now
|
||||
traverse the elements in order, and the retained elements **keep their order**
|
||||
- Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and
|
||||
`.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to
|
||||
sort the maps in place efficiently.
|
||||
|
||||
## 0.3.3
|
||||
|
||||
- Document insertion behaviour better by @lucab
|
||||
- Updated dependences (no feature changes) by @ignatenkobrain
|
||||
|
||||
## 0.3.2
|
||||
|
||||
- Add `OrderSet` by @cuviper!
|
||||
- `OrderMap::drain` is now (too) a double ended iterator.
|
||||
|
||||
## 0.3.1
|
||||
|
||||
- In all ordermap iterators, forward the `collect` method to the underlying
|
||||
iterator as well.
|
||||
- Add crates.io categories.
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- The methods `get_pair`, `get_pair_index` were both replaced by
|
||||
`get_full` (and the same for the mutable case).
|
||||
- Method `swap_remove_pair` replaced by `swap_remove_full`.
|
||||
- Add trait `MutableKeys` for opt-in mutable key access. Mutable key access
|
||||
is only possible through the methods of this extension trait.
|
||||
- Add new trait `Equivalent` for key equivalence. This extends the
|
||||
`Borrow` trait mechanism for `OrderMap::get` in a backwards compatible
|
||||
way, just some minor type inference related issues may become apparent.
|
||||
See [#10] for more information.
|
||||
- Implement `Extend<(&K, &V)>` by @xfix.
|
||||
|
||||
[#10]: https://github.com/indexmap-rs/indexmap/pull/10
|
||||
|
||||
## 0.2.13
|
||||
|
||||
- Fix deserialization to support custom hashers by @Techcable.
|
||||
- Add methods `.index()` on the entry types by @garro95.
|
||||
|
||||
## 0.2.12
|
||||
|
||||
- Add methods `.with_hasher()`, `.hasher()`.
|
||||
|
||||
## 0.2.11
|
||||
|
||||
- Support `ExactSizeIterator` for the iterators. By @Binero.
|
||||
- Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct.
|
||||
- Serde support, with crate feature `"serde-1"`. By @xfix.
|
||||
|
||||
## 0.2.10
|
||||
|
||||
- Add iterator `.drain(..)` by @stevej.
|
||||
|
||||
## 0.2.9
|
||||
|
||||
- Add method `.is_empty()` by @overvenus.
|
||||
- Implement `PartialEq, Eq` by @overvenus.
|
||||
- Add method `.sorted_by()`.
|
||||
|
||||
## 0.2.8
|
||||
|
||||
- Add iterators `.values()` and `.values_mut()`.
|
||||
- Fix compatibility with 32-bit platforms.
|
||||
|
||||
## 0.2.7
|
||||
|
||||
- Add `.retain()`.
|
||||
|
||||
## 0.2.6
|
||||
|
||||
- Add `OccupiedEntry::remove_entry` and other minor entry methods,
|
||||
so that it now has all the features of `HashMap`'s entries.
|
||||
|
||||
## 0.2.5
|
||||
|
||||
- Improved `.pop()` slightly.
|
||||
|
||||
## 0.2.4
|
||||
|
||||
- Improved performance of `.insert()` ([#3]) by @pczarn.
|
||||
|
||||
[#3]: https://github.com/indexmap-rs/indexmap/pull/3
|
||||
|
||||
## 0.2.3
|
||||
|
||||
- Generalize `Entry` for now, so that it works on hashmaps with non-default
|
||||
hasher. However, there's a lingering compat issue since libstd `HashMap`
|
||||
does not parameterize its entries by the hasher (`S` typarm).
|
||||
- Special case some iterator methods like `.nth()`.
|
||||
|
||||
## 0.2.2
|
||||
|
||||
- Disable the verbose `Debug` impl by default.
|
||||
|
||||
## 0.2.1
|
||||
|
||||
- Fix doc links and clarify docs.
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Add more `HashMap` methods & compat with its API.
|
||||
- Experimental support for `.entry()` (the simplest parts of the API).
|
||||
- Add `.reserve()` (placeholder impl).
|
||||
- Add `.remove()` as synonym for `.swap_remove()`.
|
||||
- Changed `.insert()` to swap value if the entry already exists, and
|
||||
return `Option`.
|
||||
- Experimental support as an *indexed* hash map! Added methods
|
||||
`.get_index()`, `.get_index_mut()`, `.swap_remove_index()`,
|
||||
`.get_pair_index()`, `.get_pair_index_mut()`.
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- Implement the 32/32 split idea for `Pos` which improves cache utilization
|
||||
and lookup performance.
|
||||
|
||||
## 0.1.1
|
||||
|
||||
- Initial release.
|
||||
763
third-party/vendor/indexmap/benches/bench.rs
vendored
Normal file
763
third-party/vendor/indexmap/benches/bench.rs
vendored
Normal file
|
|
@ -0,0 +1,763 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use fnv::FnvHasher;
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::hash::Hash;
|
||||
type FnvBuilder = BuildHasherDefault<FnvHasher>;
|
||||
|
||||
use test::black_box;
|
||||
use test::Bencher;
|
||||
|
||||
use indexmap::IndexMap;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::SeedableRng;
|
||||
|
||||
/// Use a consistently seeded Rng for benchmark stability
|
||||
fn small_rng() -> SmallRng {
|
||||
let seed = u64::from_le_bytes(*b"indexmap");
|
||||
SmallRng::seed_from_u64(seed)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn new_hashmap(b: &mut Bencher) {
|
||||
b.iter(|| HashMap::<String, String>::new());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn new_indexmap(b: &mut Bencher) {
|
||||
b.iter(|| IndexMap::<String, String>::new());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn with_capacity_10e5_hashmap(b: &mut Bencher) {
|
||||
b.iter(|| HashMap::<String, String>::with_capacity(10_000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn with_capacity_10e5_indexmap(b: &mut Bencher) {
|
||||
b.iter(|| IndexMap::<String, String>::with_capacity(10_000));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x, ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x, ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_string_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x.to_string(), ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_string_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x.to_string(), ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_str_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let ss = Vec::from_iter((0..c).map(|x| x.to_string()));
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for key in &ss {
|
||||
map.insert(&key[..], ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_str_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let ss = Vec::from_iter((0..c).map(|x| x.to_string()));
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for key in &ss {
|
||||
map.insert(&key[..], ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let value = [0u64; 10];
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for i in 0..c {
|
||||
map.insert(i, value);
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let value = [0u64; 10];
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for i in 0..c {
|
||||
map.insert(i, value);
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_100_000(b: &mut Bencher) {
|
||||
let c = 100_000;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x, ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_100_000(b: &mut Bencher) {
|
||||
let c = 100_000;
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x, ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_150(b: &mut Bencher) {
|
||||
let c = 150;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x, ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_150(b: &mut Bencher) {
|
||||
let c = 150;
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x, ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn entry_hashmap_150(b: &mut Bencher) {
|
||||
let c = 150;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.entry(x).or_insert(());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn entry_indexmap_150(b: &mut Bencher) {
|
||||
let c = 150;
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.entry(x).or_insert(());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn iter_sum_hashmap_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
let len = c - c / 10;
|
||||
for x in 0..len {
|
||||
map.insert(x, ());
|
||||
}
|
||||
assert_eq!(map.len(), len);
|
||||
b.iter(|| map.keys().sum::<usize>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn iter_sum_indexmap_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
let len = c - c / 10;
|
||||
for x in 0..len {
|
||||
map.insert(x, ());
|
||||
}
|
||||
assert_eq!(map.len(), len);
|
||||
b.iter(|| map.keys().sum::<usize>());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn iter_black_box_hashmap_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
let len = c - c / 10;
|
||||
for x in 0..len {
|
||||
map.insert(x, ());
|
||||
}
|
||||
assert_eq!(map.len(), len);
|
||||
b.iter(|| {
|
||||
for &key in map.keys() {
|
||||
black_box(key);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn iter_black_box_indexmap_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
let len = c - c / 10;
|
||||
for x in 0..len {
|
||||
map.insert(x, ());
|
||||
}
|
||||
assert_eq!(map.len(), len);
|
||||
b.iter(|| {
|
||||
for &key in map.keys() {
|
||||
black_box(key);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn shuffled_keys<I>(iter: I) -> Vec<I::Item>
|
||||
where
|
||||
I: IntoIterator,
|
||||
{
|
||||
let mut v = Vec::from_iter(iter);
|
||||
let mut rng = small_rng();
|
||||
v.shuffle(&mut rng);
|
||||
v
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_hashmap_10_000_exist(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(key, 1);
|
||||
}
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in 5000..c {
|
||||
found += map.get(&key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_hashmap_10_000_noexist(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(key, 1);
|
||||
}
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in c..15000 {
|
||||
found += map.get(&key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_10_000_exist(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(key, 1);
|
||||
}
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in 5000..c {
|
||||
found += map.get(&key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_10_000_noexist(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(key, 1);
|
||||
}
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in c..15000 {
|
||||
found += map.get(&key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
// number of items to look up
|
||||
const LOOKUP_MAP_SIZE: u32 = 100_000_u32;
|
||||
const LOOKUP_SAMPLE_SIZE: u32 = 5000;
|
||||
const SORT_MAP_SIZE: usize = 10_000;
|
||||
|
||||
// use lazy_static so that comparison benchmarks use the exact same inputs
|
||||
lazy_static! {
|
||||
static ref KEYS: Vec<u32> = shuffled_keys(0..LOOKUP_MAP_SIZE);
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref HMAP_100K: HashMap<u32, u32> = {
|
||||
let c = LOOKUP_MAP_SIZE;
|
||||
let mut map = HashMap::with_capacity(c as usize);
|
||||
let keys = &*KEYS;
|
||||
for &key in keys {
|
||||
map.insert(key, key);
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref IMAP_100K: IndexMap<u32, u32> = {
|
||||
let c = LOOKUP_MAP_SIZE;
|
||||
let mut map = IndexMap::with_capacity(c as usize);
|
||||
let keys = &*KEYS;
|
||||
for &key in keys {
|
||||
map.insert(key, key);
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref IMAP_SORT_U32: IndexMap<u32, u32> = {
|
||||
let mut map = IndexMap::with_capacity(SORT_MAP_SIZE);
|
||||
for &key in &KEYS[..SORT_MAP_SIZE] {
|
||||
map.insert(key, key);
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
lazy_static! {
|
||||
static ref IMAP_SORT_S: IndexMap<String, String> = {
|
||||
let mut map = IndexMap::with_capacity(SORT_MAP_SIZE);
|
||||
for &key in &KEYS[..SORT_MAP_SIZE] {
|
||||
map.insert(format!("{:^16x}", &key), String::new());
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_hashmap_100_000_multi(b: &mut Bencher) {
|
||||
let map = &*HMAP_100K;
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in 0..LOOKUP_SAMPLE_SIZE {
|
||||
found += map.get(&key).is_some() as u32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_100_000_multi(b: &mut Bencher) {
|
||||
let map = &*IMAP_100K;
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in 0..LOOKUP_SAMPLE_SIZE {
|
||||
found += map.get(&key).is_some() as u32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
// inorder: Test looking up keys in the same order as they were inserted
|
||||
#[bench]
|
||||
fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) {
|
||||
let map = &*HMAP_100K;
|
||||
let keys = &*KEYS;
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] {
|
||||
found += map.get(key).is_some() as u32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) {
|
||||
let map = &*IMAP_100K;
|
||||
let keys = &*KEYS;
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] {
|
||||
found += map.get(key).is_some() as u32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_hashmap_100_000_single(b: &mut Bencher) {
|
||||
let map = &*HMAP_100K;
|
||||
let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle();
|
||||
b.iter(|| {
|
||||
let key = iter.next().unwrap();
|
||||
map.get(&key).is_some()
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_100_000_single(b: &mut Bencher) {
|
||||
let map = &*IMAP_100K;
|
||||
let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle();
|
||||
b.iter(|| {
|
||||
let key = iter.next().unwrap();
|
||||
map.get(&key).is_some()
|
||||
});
|
||||
}
|
||||
|
||||
const GROW_SIZE: usize = 100_000;
|
||||
type GrowKey = u32;
|
||||
|
||||
// Test grow/resize without preallocation
|
||||
#[bench]
|
||||
fn grow_fnv_hashmap_100_000(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut map: HashMap<_, _, FnvBuilder> = HashMap::default();
|
||||
for x in 0..GROW_SIZE {
|
||||
map.insert(x as GrowKey, x as GrowKey);
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn grow_fnv_indexmap_100_000(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default();
|
||||
for x in 0..GROW_SIZE {
|
||||
map.insert(x as GrowKey, x as GrowKey);
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
const MERGE: u64 = 10_000;
|
||||
#[bench]
|
||||
fn hashmap_merge_simple(b: &mut Bencher) {
|
||||
let first_map: HashMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
|
||||
let second_map: HashMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
|
||||
b.iter(|| {
|
||||
let mut merged = first_map.clone();
|
||||
merged.extend(second_map.iter().map(|(&k, &v)| (k, v)));
|
||||
merged
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn hashmap_merge_shuffle(b: &mut Bencher) {
|
||||
let first_map: HashMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
|
||||
let second_map: HashMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
|
||||
let mut v = Vec::new();
|
||||
let mut rng = small_rng();
|
||||
b.iter(|| {
|
||||
let mut merged = first_map.clone();
|
||||
v.extend(second_map.iter().map(|(&k, &v)| (k, v)));
|
||||
v.shuffle(&mut rng);
|
||||
merged.extend(v.drain(..));
|
||||
|
||||
merged
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_merge_simple(b: &mut Bencher) {
|
||||
let first_map: IndexMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
|
||||
let second_map: IndexMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
|
||||
b.iter(|| {
|
||||
let mut merged = first_map.clone();
|
||||
merged.extend(second_map.iter().map(|(&k, &v)| (k, v)));
|
||||
merged
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_merge_shuffle(b: &mut Bencher) {
|
||||
let first_map: IndexMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
|
||||
let second_map: IndexMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
|
||||
let mut v = Vec::new();
|
||||
let mut rng = small_rng();
|
||||
b.iter(|| {
|
||||
let mut merged = first_map.clone();
|
||||
v.extend(second_map.iter().map(|(&k, &v)| (k, v)));
|
||||
v.shuffle(&mut rng);
|
||||
merged.extend(v.drain(..));
|
||||
|
||||
merged
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn swap_remove_indexmap_100_000(b: &mut Bencher) {
|
||||
let map = IMAP_100K.clone();
|
||||
let mut keys = Vec::from_iter(map.keys().copied());
|
||||
let mut rng = small_rng();
|
||||
keys.shuffle(&mut rng);
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
for key in &keys {
|
||||
map.swap_remove(key);
|
||||
}
|
||||
assert_eq!(map.len(), 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn shift_remove_indexmap_100_000_few(b: &mut Bencher) {
|
||||
let map = IMAP_100K.clone();
|
||||
let mut keys = Vec::from_iter(map.keys().copied());
|
||||
let mut rng = small_rng();
|
||||
keys.shuffle(&mut rng);
|
||||
keys.truncate(50);
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
for key in &keys {
|
||||
map.shift_remove(key);
|
||||
}
|
||||
assert_eq!(map.len(), IMAP_100K.len() - keys.len());
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn shift_remove_indexmap_2_000_full(b: &mut Bencher) {
|
||||
let mut keys = KEYS[..2_000].to_vec();
|
||||
let mut map = IndexMap::with_capacity(keys.len());
|
||||
for &key in &keys {
|
||||
map.insert(key, key);
|
||||
}
|
||||
let mut rng = small_rng();
|
||||
keys.shuffle(&mut rng);
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
for key in &keys {
|
||||
map.shift_remove(key);
|
||||
}
|
||||
assert_eq!(map.len(), 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn pop_indexmap_100_000(b: &mut Bencher) {
|
||||
let map = IMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
while !map.is_empty() {
|
||||
map.pop();
|
||||
}
|
||||
assert_eq!(map.len(), 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn few_retain_indexmap_100_000(b: &mut Bencher) {
|
||||
let map = IMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.retain(|k, _| *k % 7 == 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn few_retain_hashmap_100_000(b: &mut Bencher) {
|
||||
let map = HMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.retain(|k, _| *k % 7 == 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn half_retain_indexmap_100_000(b: &mut Bencher) {
|
||||
let map = IMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.retain(|k, _| *k % 2 == 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn half_retain_hashmap_100_000(b: &mut Bencher) {
|
||||
let map = HMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.retain(|k, _| *k % 2 == 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn many_retain_indexmap_100_000(b: &mut Bencher) {
|
||||
let map = IMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.retain(|k, _| *k % 100 != 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn many_retain_hashmap_100_000(b: &mut Bencher) {
|
||||
let map = HMAP_100K.clone();
|
||||
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.retain(|k, _| *k % 100 != 0);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
// simple sort impl for comparison
|
||||
pub fn simple_sort<K: Ord + Hash, V>(m: &mut IndexMap<K, V>) {
|
||||
let mut ordered: Vec<_> = m.drain(..).collect();
|
||||
ordered.sort_by(|left, right| left.0.cmp(&right.0));
|
||||
m.extend(ordered);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_sort_s(b: &mut Bencher) {
|
||||
let map = IMAP_SORT_S.clone();
|
||||
|
||||
// there's a map clone there, but it's still useful to profile this
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.sort_keys();
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_simple_sort_s(b: &mut Bencher) {
|
||||
let map = IMAP_SORT_S.clone();
|
||||
|
||||
// there's a map clone there, but it's still useful to profile this
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
simple_sort(&mut map);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_sort_u32(b: &mut Bencher) {
|
||||
let map = IMAP_SORT_U32.clone();
|
||||
|
||||
// there's a map clone there, but it's still useful to profile this
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
map.sort_keys();
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_simple_sort_u32(b: &mut Bencher) {
|
||||
let map = IMAP_SORT_U32.clone();
|
||||
|
||||
// there's a map clone there, but it's still useful to profile this
|
||||
b.iter(|| {
|
||||
let mut map = map.clone();
|
||||
simple_sort(&mut map);
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
// measure the fixed overhead of cloning in sort benchmarks
|
||||
#[bench]
|
||||
fn indexmap_clone_for_sort_s(b: &mut Bencher) {
|
||||
let map = IMAP_SORT_S.clone();
|
||||
|
||||
b.iter(|| map.clone());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn indexmap_clone_for_sort_u32(b: &mut Bencher) {
|
||||
let map = IMAP_SORT_U32.clone();
|
||||
|
||||
b.iter(|| map.clone());
|
||||
}
|
||||
185
third-party/vendor/indexmap/benches/faststring.rs
vendored
Normal file
185
third-party/vendor/indexmap/benches/faststring.rs
vendored
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
|
||||
use test::Bencher;
|
||||
|
||||
use indexmap::IndexMap;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::SeedableRng;
|
||||
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use std::borrow::Borrow;
|
||||
use std::ops::Deref;
|
||||
|
||||
/// Use a consistently seeded Rng for benchmark stability
|
||||
fn small_rng() -> SmallRng {
|
||||
let seed = u64::from_le_bytes(*b"indexmap");
|
||||
SmallRng::seed_from_u64(seed)
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Copy, Clone)]
|
||||
#[repr(transparent)]
|
||||
pub struct OneShot<T: ?Sized>(pub T);
|
||||
|
||||
impl Hash for OneShot<str> {
|
||||
fn hash<H: Hasher>(&self, h: &mut H) {
|
||||
h.write(self.0.as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, S> From<&'a S> for &'a OneShot<str>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
{
|
||||
fn from(s: &'a S) -> Self {
|
||||
let s: &str = s.as_ref();
|
||||
unsafe { &*(s as *const str as *const OneShot<str>) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for OneShot<String> {
|
||||
fn hash<H: Hasher>(&self, h: &mut H) {
|
||||
h.write(self.0.as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl Borrow<OneShot<str>> for OneShot<String> {
|
||||
fn borrow(&self) -> &OneShot<str> {
|
||||
<&OneShot<str>>::from(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for OneShot<T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
fn shuffled_keys<I>(iter: I) -> Vec<I::Item>
|
||||
where
|
||||
I: IntoIterator,
|
||||
{
|
||||
let mut v = Vec::from_iter(iter);
|
||||
let mut rng = small_rng();
|
||||
v.shuffle(&mut rng);
|
||||
v
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_string_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x.to_string(), ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(OneShot(x.to_string()), ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn insert_indexmap_string_10_000(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
b.iter(|| {
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
for x in 0..c {
|
||||
map.insert(x.to_string(), ());
|
||||
}
|
||||
map
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(key.to_string(), 1);
|
||||
}
|
||||
let lookups = (5000..c).map(|x| x.to_string()).collect::<Vec<_>>();
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in &lookups {
|
||||
found += map.get(key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = HashMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(OneShot(key.to_string()), 1);
|
||||
}
|
||||
let lookups = (5000..c)
|
||||
.map(|x| OneShot(x.to_string()))
|
||||
.collect::<Vec<_>>();
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in &lookups {
|
||||
found += map.get(key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(key.to_string(), 1);
|
||||
}
|
||||
let lookups = (5000..c).map(|x| x.to_string()).collect::<Vec<_>>();
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in &lookups {
|
||||
found += map.get(key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) {
|
||||
let c = 10_000;
|
||||
let mut map = IndexMap::with_capacity(c);
|
||||
let keys = shuffled_keys(0..c);
|
||||
for &key in &keys {
|
||||
map.insert(OneShot(key.to_string()), 1);
|
||||
}
|
||||
let lookups = (5000..c)
|
||||
.map(|x| OneShot(x.to_string()))
|
||||
.collect::<Vec<_>>();
|
||||
b.iter(|| {
|
||||
let mut found = 0;
|
||||
for key in &lookups {
|
||||
found += map.get(key).is_some() as i32;
|
||||
}
|
||||
found
|
||||
});
|
||||
}
|
||||
77
third-party/vendor/indexmap/src/arbitrary.rs
vendored
Normal file
77
third-party/vendor/indexmap/src/arbitrary.rs
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
#[cfg(feature = "arbitrary")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))]
|
||||
mod impl_arbitrary {
|
||||
use crate::{IndexMap, IndexSet};
|
||||
use arbitrary::{Arbitrary, Result, Unstructured};
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
|
||||
impl<'a, K, V, S> Arbitrary<'a> for IndexMap<K, V, S>
|
||||
where
|
||||
K: Arbitrary<'a> + Hash + Eq,
|
||||
V: Arbitrary<'a>,
|
||||
S: BuildHasher + Default,
|
||||
{
|
||||
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
|
||||
u.arbitrary_iter()?.collect()
|
||||
}
|
||||
|
||||
fn arbitrary_take_rest(u: Unstructured<'a>) -> Result<Self> {
|
||||
u.arbitrary_take_rest_iter()?.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S> Arbitrary<'a> for IndexSet<T, S>
|
||||
where
|
||||
T: Arbitrary<'a> + Hash + Eq,
|
||||
S: BuildHasher + Default,
|
||||
{
|
||||
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
|
||||
u.arbitrary_iter()?.collect()
|
||||
}
|
||||
|
||||
fn arbitrary_take_rest(u: Unstructured<'a>) -> Result<Self> {
|
||||
u.arbitrary_take_rest_iter()?.collect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickcheck")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))]
|
||||
mod impl_quickcheck {
|
||||
use crate::{IndexMap, IndexSet};
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use quickcheck::{Arbitrary, Gen};
|
||||
|
||||
impl<K, V, S> Arbitrary for IndexMap<K, V, S>
|
||||
where
|
||||
K: Arbitrary + Hash + Eq,
|
||||
V: Arbitrary,
|
||||
S: BuildHasher + Default + Clone + 'static,
|
||||
{
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
Self::from_iter(Vec::arbitrary(g))
|
||||
}
|
||||
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
let vec = Vec::from_iter(self.clone());
|
||||
Box::new(vec.shrink().map(Self::from_iter))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> Arbitrary for IndexSet<T, S>
|
||||
where
|
||||
T: Arbitrary + Hash + Eq,
|
||||
S: BuildHasher + Default + Clone + 'static,
|
||||
{
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
Self::from_iter(Vec::arbitrary(g))
|
||||
}
|
||||
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
let vec = Vec::from_iter(self.clone());
|
||||
Box::new(vec.shrink().map(Self::from_iter))
|
||||
}
|
||||
}
|
||||
}
|
||||
123
third-party/vendor/indexmap/src/borsh.rs
vendored
Normal file
123
third-party/vendor/indexmap/src/borsh.rs
vendored
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
#![cfg_attr(docsrs, doc(cfg(feature = "borsh")))]
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use core::hash::BuildHasher;
|
||||
use core::hash::Hash;
|
||||
use core::iter::ExactSizeIterator;
|
||||
use core::mem::size_of;
|
||||
|
||||
use borsh::error::ERROR_ZST_FORBIDDEN;
|
||||
use borsh::io::{Error, ErrorKind, Read, Result, Write};
|
||||
use borsh::{BorshDeserialize, BorshSerialize};
|
||||
|
||||
use crate::map::IndexMap;
|
||||
use crate::set::IndexSet;
|
||||
|
||||
impl<K, V, S> BorshSerialize for IndexMap<K, V, S>
|
||||
where
|
||||
K: BorshSerialize,
|
||||
V: BorshSerialize,
|
||||
{
|
||||
#[inline]
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> Result<()> {
|
||||
check_zst::<K>()?;
|
||||
|
||||
let iterator = self.iter();
|
||||
|
||||
u32::try_from(iterator.len())
|
||||
.map_err(|_| ErrorKind::InvalidData)?
|
||||
.serialize(writer)?;
|
||||
|
||||
for (key, value) in iterator {
|
||||
key.serialize(writer)?;
|
||||
value.serialize(writer)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> BorshDeserialize for IndexMap<K, V, S>
|
||||
where
|
||||
K: BorshDeserialize + Eq + Hash,
|
||||
V: BorshDeserialize,
|
||||
S: BuildHasher + Default,
|
||||
{
|
||||
#[inline]
|
||||
fn deserialize_reader<R: Read>(reader: &mut R) -> Result<Self> {
|
||||
check_zst::<K>()?;
|
||||
let vec = <Vec<(K, V)>>::deserialize_reader(reader)?;
|
||||
Ok(vec.into_iter().collect::<IndexMap<K, V, S>>())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> BorshSerialize for IndexSet<T, S>
|
||||
where
|
||||
T: BorshSerialize,
|
||||
{
|
||||
#[inline]
|
||||
fn serialize<W: Write>(&self, writer: &mut W) -> Result<()> {
|
||||
check_zst::<T>()?;
|
||||
|
||||
let iterator = self.iter();
|
||||
|
||||
u32::try_from(iterator.len())
|
||||
.map_err(|_| ErrorKind::InvalidData)?
|
||||
.serialize(writer)?;
|
||||
|
||||
for item in iterator {
|
||||
item.serialize(writer)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> BorshDeserialize for IndexSet<T, S>
|
||||
where
|
||||
T: BorshDeserialize + Eq + Hash,
|
||||
S: BuildHasher + Default,
|
||||
{
|
||||
#[inline]
|
||||
fn deserialize_reader<R: Read>(reader: &mut R) -> Result<Self> {
|
||||
check_zst::<T>()?;
|
||||
let vec = <Vec<T>>::deserialize_reader(reader)?;
|
||||
Ok(vec.into_iter().collect::<IndexSet<T, S>>())
|
||||
}
|
||||
}
|
||||
|
||||
fn check_zst<T>() -> Result<()> {
|
||||
if size_of::<T>() == 0 {
|
||||
return Err(Error::new(ErrorKind::InvalidData, ERROR_ZST_FORBIDDEN));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod borsh_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn map_borsh_roundtrip() {
|
||||
let original_map: IndexMap<i32, i32> = {
|
||||
let mut map = IndexMap::new();
|
||||
map.insert(1, 2);
|
||||
map.insert(3, 4);
|
||||
map.insert(5, 6);
|
||||
map
|
||||
};
|
||||
let serialized_map = borsh::to_vec(&original_map).unwrap();
|
||||
let deserialized_map: IndexMap<i32, i32> =
|
||||
BorshDeserialize::try_from_slice(&serialized_map).unwrap();
|
||||
assert_eq!(original_map, deserialized_map);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_borsh_roundtrip() {
|
||||
let original_map: IndexSet<i32> = [1, 2, 3, 4, 5, 6].into_iter().collect();
|
||||
let serialized_map = borsh::to_vec(&original_map).unwrap();
|
||||
let deserialized_map: IndexSet<i32> =
|
||||
BorshDeserialize::try_from_slice(&serialized_map).unwrap();
|
||||
assert_eq!(original_map, deserialized_map);
|
||||
}
|
||||
}
|
||||
275
third-party/vendor/indexmap/src/lib.rs
vendored
Normal file
275
third-party/vendor/indexmap/src/lib.rs
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
// We *mostly* avoid unsafe code, but `map::core::raw` allows it to use `RawTable` buckets.
|
||||
#![deny(unsafe_code)]
|
||||
#![warn(rust_2018_idioms)]
|
||||
#![no_std]
|
||||
|
||||
//! [`IndexMap`] is a hash table where the iteration order of the key-value
|
||||
//! pairs is independent of the hash values of the keys.
|
||||
//!
|
||||
//! [`IndexSet`] is a corresponding hash set using the same implementation and
|
||||
//! with similar properties.
|
||||
//!
|
||||
//! ### Highlights
|
||||
//!
|
||||
//! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap`
|
||||
//! and `HashSet`, but they also have some features of note:
|
||||
//!
|
||||
//! - The ordering semantics (see their documentation for details)
|
||||
//! - Sorting methods and the [`.pop()`][IndexMap::pop] methods.
|
||||
//! - The [`Equivalent`] trait, which offers more flexible equality definitions
|
||||
//! between borrowed and owned versions of keys.
|
||||
//! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable
|
||||
//! access to hash map keys.
|
||||
//!
|
||||
//! ### Feature Flags
|
||||
//!
|
||||
//! To reduce the amount of compiled code in the crate by default, certain
|
||||
//! features are gated behind [feature flags]. These allow you to opt in to (or
|
||||
//! out of) functionality. Below is a list of the features available in this
|
||||
//! crate.
|
||||
//!
|
||||
//! * `std`: Enables features which require the Rust standard library. For more
|
||||
//! information see the section on [`no_std`].
|
||||
//! * `rayon`: Enables parallel iteration and other parallel methods.
|
||||
//! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`]
|
||||
//! to [`IndexMap`] and [`IndexSet`]. Alternative implementations for
|
||||
//! (de)serializing [`IndexMap`] as an ordered sequence are available in the
|
||||
//! [`map::serde_seq`] module.
|
||||
//! * `borsh`: Adds implementations for [`BorshSerialize`] and [`BorshDeserialize`]
|
||||
//! to [`IndexMap`] and [`IndexSet`].
|
||||
//! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait
|
||||
//! to [`IndexMap`] and [`IndexSet`].
|
||||
//! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait
|
||||
//! to [`IndexMap`] and [`IndexSet`].
|
||||
//!
|
||||
//! _Note: only the `std` feature is enabled by default._
|
||||
//!
|
||||
//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
|
||||
//! [`no_std`]: #no-standard-library-targets
|
||||
//! [`Serialize`]: `::serde::Serialize`
|
||||
//! [`Deserialize`]: `::serde::Deserialize`
|
||||
//! [`BorshSerialize`]: `::borsh::BorshSerialize`
|
||||
//! [`BorshDeserialize`]: `::borsh::BorshDeserialize`
|
||||
//! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary`
|
||||
//! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary`
|
||||
//!
|
||||
//! ### Alternate Hashers
|
||||
//!
|
||||
//! [`IndexMap`] and [`IndexSet`] have a default hasher type
|
||||
//! [`S = RandomState`][std::collections::hash_map::RandomState],
|
||||
//! just like the standard `HashMap` and `HashSet`, which is resistant to
|
||||
//! HashDoS attacks but not the most performant. Type aliases can make it easier
|
||||
//! to use alternate hashers:
|
||||
//!
|
||||
//! ```
|
||||
//! use fnv::FnvBuildHasher;
|
||||
//! use fxhash::FxBuildHasher;
|
||||
//! use indexmap::{IndexMap, IndexSet};
|
||||
//!
|
||||
//! type FnvIndexMap<K, V> = IndexMap<K, V, FnvBuildHasher>;
|
||||
//! type FnvIndexSet<T> = IndexSet<T, FnvBuildHasher>;
|
||||
//!
|
||||
//! type FxIndexMap<K, V> = IndexMap<K, V, FxBuildHasher>;
|
||||
//! type FxIndexSet<T> = IndexSet<T, FxBuildHasher>;
|
||||
//!
|
||||
//! let std: IndexSet<i32> = (0..100).collect();
|
||||
//! let fnv: FnvIndexSet<i32> = (0..100).collect();
|
||||
//! let fx: FxIndexSet<i32> = (0..100).collect();
|
||||
//! assert_eq!(std, fnv);
|
||||
//! assert_eq!(std, fx);
|
||||
//! ```
|
||||
//!
|
||||
//! ### Rust Version
|
||||
//!
|
||||
//! This version of indexmap requires Rust 1.63 or later.
|
||||
//!
|
||||
//! The indexmap 2.x release series will use a carefully considered version
|
||||
//! upgrade policy, where in a later 2.x version, we will raise the minimum
|
||||
//! required Rust version.
|
||||
//!
|
||||
//! ## No Standard Library Targets
|
||||
//!
|
||||
//! This crate supports being built without `std`, requiring `alloc` instead.
|
||||
//! This is chosen by disabling the default "std" cargo feature, by adding
|
||||
//! `default-features = false` to your dependency specification.
|
||||
//!
|
||||
//! - Creating maps and sets using [`new`][IndexMap::new] and
|
||||
//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`.
|
||||
//! Use methods [`IndexMap::default`], [`with_hasher`][IndexMap::with_hasher],
|
||||
//! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead.
|
||||
//! A no-std compatible hasher will be needed as well, for example
|
||||
//! from the crate `twox-hash`.
|
||||
//! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`.
|
||||
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
use alloc::vec::{self, Vec};
|
||||
|
||||
mod arbitrary;
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
#[cfg(feature = "borsh")]
|
||||
mod borsh;
|
||||
mod mutable_keys;
|
||||
#[cfg(feature = "serde")]
|
||||
mod serde;
|
||||
mod util;
|
||||
|
||||
pub mod map;
|
||||
pub mod set;
|
||||
|
||||
// Placed after `map` and `set` so new `rayon` methods on the types
|
||||
// are documented after the "normal" methods.
|
||||
#[cfg(feature = "rayon")]
|
||||
mod rayon;
|
||||
|
||||
#[cfg(feature = "rustc-rayon")]
|
||||
mod rustc;
|
||||
|
||||
pub use crate::map::IndexMap;
|
||||
pub use crate::set::IndexSet;
|
||||
pub use equivalent::Equivalent;
|
||||
|
||||
// shared private items
|
||||
|
||||
/// Hash value newtype. Not larger than usize, since anything larger
|
||||
/// isn't used for selecting position anyway.
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
struct HashValue(usize);
|
||||
|
||||
impl HashValue {
|
||||
#[inline(always)]
|
||||
fn get(self) -> u64 {
|
||||
self.0 as u64
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Debug)]
|
||||
struct Bucket<K, V> {
|
||||
hash: HashValue,
|
||||
key: K,
|
||||
value: V,
|
||||
}
|
||||
|
||||
impl<K, V> Clone for Bucket<K, V>
|
||||
where
|
||||
K: Clone,
|
||||
V: Clone,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Bucket {
|
||||
hash: self.hash,
|
||||
key: self.key.clone(),
|
||||
value: self.value.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_from(&mut self, other: &Self) {
|
||||
self.hash = other.hash;
|
||||
self.key.clone_from(&other.key);
|
||||
self.value.clone_from(&other.value);
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Bucket<K, V> {
|
||||
// field accessors -- used for `f` instead of closures in `.map(f)`
|
||||
fn key_ref(&self) -> &K {
|
||||
&self.key
|
||||
}
|
||||
fn value_ref(&self) -> &V {
|
||||
&self.value
|
||||
}
|
||||
fn value_mut(&mut self) -> &mut V {
|
||||
&mut self.value
|
||||
}
|
||||
fn key(self) -> K {
|
||||
self.key
|
||||
}
|
||||
fn value(self) -> V {
|
||||
self.value
|
||||
}
|
||||
fn key_value(self) -> (K, V) {
|
||||
(self.key, self.value)
|
||||
}
|
||||
fn refs(&self) -> (&K, &V) {
|
||||
(&self.key, &self.value)
|
||||
}
|
||||
fn ref_mut(&mut self) -> (&K, &mut V) {
|
||||
(&self.key, &mut self.value)
|
||||
}
|
||||
fn muts(&mut self) -> (&mut K, &mut V) {
|
||||
(&mut self.key, &mut self.value)
|
||||
}
|
||||
}
|
||||
|
||||
trait Entries {
|
||||
type Entry;
|
||||
fn into_entries(self) -> Vec<Self::Entry>;
|
||||
fn as_entries(&self) -> &[Self::Entry];
|
||||
fn as_entries_mut(&mut self) -> &mut [Self::Entry];
|
||||
fn with_entries<F>(&mut self, f: F)
|
||||
where
|
||||
F: FnOnce(&mut [Self::Entry]);
|
||||
}
|
||||
|
||||
/// The error type for [`try_reserve`][IndexMap::try_reserve] methods.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct TryReserveError {
|
||||
kind: TryReserveErrorKind,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
enum TryReserveErrorKind {
|
||||
// The standard library's kind is currently opaque to us, otherwise we could unify this.
|
||||
Std(alloc::collections::TryReserveError),
|
||||
CapacityOverflow,
|
||||
AllocError { layout: alloc::alloc::Layout },
|
||||
}
|
||||
|
||||
// These are not `From` so we don't expose them in our public API.
|
||||
impl TryReserveError {
|
||||
fn from_alloc(error: alloc::collections::TryReserveError) -> Self {
|
||||
Self {
|
||||
kind: TryReserveErrorKind::Std(error),
|
||||
}
|
||||
}
|
||||
|
||||
fn from_hashbrown(error: hashbrown::TryReserveError) -> Self {
|
||||
Self {
|
||||
kind: match error {
|
||||
hashbrown::TryReserveError::CapacityOverflow => {
|
||||
TryReserveErrorKind::CapacityOverflow
|
||||
}
|
||||
hashbrown::TryReserveError::AllocError { layout } => {
|
||||
TryReserveErrorKind::AllocError { layout }
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Display for TryReserveError {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
let reason = match &self.kind {
|
||||
TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f),
|
||||
TryReserveErrorKind::CapacityOverflow => {
|
||||
" because the computed capacity exceeded the collection's maximum"
|
||||
}
|
||||
TryReserveErrorKind::AllocError { .. } => {
|
||||
" because the memory allocator returned an error"
|
||||
}
|
||||
};
|
||||
f.write_str("memory allocation failed")?;
|
||||
f.write_str(reason)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
impl std::error::Error for TryReserveError {}
|
||||
178
third-party/vendor/indexmap/src/macros.rs
vendored
Normal file
178
third-party/vendor/indexmap/src/macros.rs
vendored
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
#[macro_export]
|
||||
/// Create an [`IndexMap`][crate::IndexMap] from a list of key-value pairs
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use indexmap::indexmap;
|
||||
///
|
||||
/// let map = indexmap!{
|
||||
/// "a" => 1,
|
||||
/// "b" => 2,
|
||||
/// };
|
||||
/// assert_eq!(map["a"], 1);
|
||||
/// assert_eq!(map["b"], 2);
|
||||
/// assert_eq!(map.get("c"), None);
|
||||
///
|
||||
/// // "a" is the first key
|
||||
/// assert_eq!(map.keys().next(), Some(&"a"));
|
||||
/// ```
|
||||
macro_rules! indexmap {
|
||||
($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) };
|
||||
($($key:expr => $value:expr),*) => {
|
||||
{
|
||||
// Note: `stringify!($key)` is just here to consume the repetition,
|
||||
// but we throw away that string literal during constant evaluation.
|
||||
const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]);
|
||||
let mut map = $crate::IndexMap::with_capacity(CAP);
|
||||
$(
|
||||
map.insert($key, $value);
|
||||
)*
|
||||
map
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
#[macro_export]
|
||||
/// Create an [`IndexSet`][crate::IndexSet] from a list of values
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use indexmap::indexset;
|
||||
///
|
||||
/// let set = indexset!{
|
||||
/// "a",
|
||||
/// "b",
|
||||
/// };
|
||||
/// assert!(set.contains("a"));
|
||||
/// assert!(set.contains("b"));
|
||||
/// assert!(!set.contains("c"));
|
||||
///
|
||||
/// // "a" is the first value
|
||||
/// assert_eq!(set.iter().next(), Some(&"a"));
|
||||
/// ```
|
||||
macro_rules! indexset {
|
||||
($($value:expr,)+) => { $crate::indexset!($($value),+) };
|
||||
($($value:expr),*) => {
|
||||
{
|
||||
// Note: `stringify!($value)` is just here to consume the repetition,
|
||||
// but we throw away that string literal during constant evaluation.
|
||||
const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]);
|
||||
let mut set = $crate::IndexSet::with_capacity(CAP);
|
||||
$(
|
||||
set.insert($value);
|
||||
)*
|
||||
set
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// generate all the Iterator methods by just forwarding to the underlying
|
||||
// self.iter and mapping its element.
|
||||
macro_rules! iterator_methods {
|
||||
// $map_elt is the mapping function from the underlying iterator's element
|
||||
// same mapping function for both options and iterators
|
||||
($map_elt:expr) => {
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next().map($map_elt)
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.size_hint()
|
||||
}
|
||||
|
||||
fn count(self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
|
||||
fn nth(&mut self, n: usize) -> Option<Self::Item> {
|
||||
self.iter.nth(n).map($map_elt)
|
||||
}
|
||||
|
||||
fn last(mut self) -> Option<Self::Item> {
|
||||
self.next_back()
|
||||
}
|
||||
|
||||
fn collect<C>(self) -> C
|
||||
where
|
||||
C: FromIterator<Self::Item>,
|
||||
{
|
||||
// NB: forwarding this directly to standard iterators will
|
||||
// allow it to leverage unstable traits like `TrustedLen`.
|
||||
self.iter.map($map_elt).collect()
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! double_ended_iterator_methods {
|
||||
// $map_elt is the mapping function from the underlying iterator's element
|
||||
// same mapping function for both options and iterators
|
||||
($map_elt:expr) => {
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next_back().map($map_elt)
|
||||
}
|
||||
|
||||
fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
|
||||
self.iter.nth_back(n).map($map_elt)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// generate `ParallelIterator` methods by just forwarding to the underlying
|
||||
// self.entries and mapping its elements.
|
||||
#[cfg(any(feature = "rayon", feature = "rustc-rayon"))]
|
||||
macro_rules! parallel_iterator_methods {
|
||||
// $map_elt is the mapping function from the underlying iterator's element
|
||||
($map_elt:expr) => {
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.entries
|
||||
.into_par_iter()
|
||||
.map($map_elt)
|
||||
.drive_unindexed(consumer)
|
||||
}
|
||||
|
||||
// NB: This allows indexed collection, e.g. directly into a `Vec`, but the
|
||||
// underlying iterator must really be indexed. We should remove this if we
|
||||
// start having tombstones that must be filtered out.
|
||||
fn opt_len(&self) -> Option<usize> {
|
||||
Some(self.entries.len())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// generate `IndexedParallelIterator` methods by just forwarding to the underlying
|
||||
// self.entries and mapping its elements.
|
||||
#[cfg(any(feature = "rayon", feature = "rustc-rayon"))]
|
||||
macro_rules! indexed_parallel_iterator_methods {
|
||||
// $map_elt is the mapping function from the underlying iterator's element
|
||||
($map_elt:expr) => {
|
||||
fn drive<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: Consumer<Self::Item>,
|
||||
{
|
||||
self.entries.into_par_iter().map($map_elt).drive(consumer)
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
fn with_producer<CB>(self, callback: CB) -> CB::Output
|
||||
where
|
||||
CB: ProducerCallback<Self::Item>,
|
||||
{
|
||||
self.entries
|
||||
.into_par_iter()
|
||||
.map($map_elt)
|
||||
.with_producer(callback)
|
||||
}
|
||||
};
|
||||
}
|
||||
1423
third-party/vendor/indexmap/src/map.rs
vendored
Normal file
1423
third-party/vendor/indexmap/src/map.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
642
third-party/vendor/indexmap/src/map/core.rs
vendored
Normal file
642
third-party/vendor/indexmap/src/map/core.rs
vendored
Normal file
|
|
@ -0,0 +1,642 @@
|
|||
//! This is the core implementation that doesn't depend on the hasher at all.
|
||||
//!
|
||||
//! The methods of `IndexMapCore` don't use any Hash properties of K.
|
||||
//!
|
||||
//! It's cleaner to separate them out, then the compiler checks that we are not
|
||||
//! using Hash at all in these methods.
|
||||
//!
|
||||
//! However, we should probably not let this show in the public API or docs.
|
||||
|
||||
mod entry;
|
||||
mod raw;
|
||||
|
||||
pub mod raw_entry_v1;
|
||||
|
||||
use hashbrown::raw::RawTable;
|
||||
|
||||
use crate::vec::{self, Vec};
|
||||
use crate::TryReserveError;
|
||||
use core::mem;
|
||||
use core::ops::RangeBounds;
|
||||
|
||||
use crate::util::simplify_range;
|
||||
use crate::{Bucket, Entries, Equivalent, HashValue};
|
||||
|
||||
pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
|
||||
|
||||
/// Core of the map that does not depend on S
|
||||
pub(crate) struct IndexMapCore<K, V> {
|
||||
/// indices mapping from the entry hash to its index.
|
||||
indices: RawTable<usize>,
|
||||
/// entries is a dense vec of entries in their order.
|
||||
entries: Vec<Bucket<K, V>>,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
|
||||
move |&i| entries[i].hash.get()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
|
||||
key: &'a Q,
|
||||
entries: &'a [Bucket<K, V>],
|
||||
) -> impl Fn(&usize) -> bool + 'a {
|
||||
move |&i| Q::equivalent(key, &entries[i].key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
|
||||
let erased = table.erase_entry(hash.get(), move |&i| i == index);
|
||||
debug_assert!(erased);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
|
||||
let index = table
|
||||
.get_mut(hash.get(), move |&i| i == old)
|
||||
.expect("index not found");
|
||||
*index = new;
|
||||
}
|
||||
|
||||
impl<K, V> Clone for IndexMapCore<K, V>
|
||||
where
|
||||
K: Clone,
|
||||
V: Clone,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
let mut new = Self::new();
|
||||
new.clone_from(self);
|
||||
new
|
||||
}
|
||||
|
||||
fn clone_from(&mut self, other: &Self) {
|
||||
let hasher = get_hash(&other.entries);
|
||||
self.indices.clone_from_with_hasher(&other.indices, hasher);
|
||||
if self.entries.capacity() < other.entries.len() {
|
||||
// If we must resize, match the indices capacity.
|
||||
let additional = other.entries.len() - self.entries.len();
|
||||
self.reserve_entries(additional);
|
||||
}
|
||||
self.entries.clone_from(&other.entries);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "test_debug")]
|
||||
impl<K, V> core::fmt::Debug for IndexMapCore<K, V>
|
||||
where
|
||||
K: core::fmt::Debug,
|
||||
V: core::fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
f.debug_struct("IndexMapCore")
|
||||
.field("indices", &raw::DebugIndices(&self.indices))
|
||||
.field("entries", &self.entries)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Entries for IndexMapCore<K, V> {
|
||||
type Entry = Bucket<K, V>;
|
||||
|
||||
#[inline]
|
||||
fn into_entries(self) -> Vec<Self::Entry> {
|
||||
self.entries
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn as_entries(&self) -> &[Self::Entry] {
|
||||
&self.entries
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
|
||||
&mut self.entries
|
||||
}
|
||||
|
||||
fn with_entries<F>(&mut self, f: F)
|
||||
where
|
||||
F: FnOnce(&mut [Self::Entry]),
|
||||
{
|
||||
f(&mut self.entries);
|
||||
self.rebuild_hash_table();
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> IndexMapCore<K, V> {
|
||||
/// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
|
||||
const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
|
||||
|
||||
#[inline]
|
||||
pub(crate) const fn new() -> Self {
|
||||
IndexMapCore {
|
||||
indices: RawTable::new(),
|
||||
entries: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn with_capacity(n: usize) -> Self {
|
||||
IndexMapCore {
|
||||
indices: RawTable::with_capacity(n),
|
||||
entries: Vec::with_capacity(n),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.indices.len()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn capacity(&self) -> usize {
|
||||
Ord::min(self.indices.capacity(), self.entries.capacity())
|
||||
}
|
||||
|
||||
pub(crate) fn clear(&mut self) {
|
||||
self.indices.clear();
|
||||
self.entries.clear();
|
||||
}
|
||||
|
||||
pub(crate) fn truncate(&mut self, len: usize) {
|
||||
if len < self.len() {
|
||||
self.erase_indices(len, self.entries.len());
|
||||
self.entries.truncate(len);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
|
||||
where
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
let range = simplify_range(range, self.entries.len());
|
||||
self.erase_indices(range.start, range.end);
|
||||
self.entries.drain(range)
|
||||
}
|
||||
|
||||
#[cfg(feature = "rayon")]
|
||||
pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
use rayon::iter::ParallelDrainRange;
|
||||
let range = simplify_range(range, self.entries.len());
|
||||
self.erase_indices(range.start, range.end);
|
||||
self.entries.par_drain(range)
|
||||
}
|
||||
|
||||
pub(crate) fn split_off(&mut self, at: usize) -> Self {
|
||||
assert!(at <= self.entries.len());
|
||||
self.erase_indices(at, self.entries.len());
|
||||
let entries = self.entries.split_off(at);
|
||||
|
||||
let mut indices = RawTable::with_capacity(entries.len());
|
||||
raw::insert_bulk_no_grow(&mut indices, &entries);
|
||||
Self { indices, entries }
|
||||
}
|
||||
|
||||
pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
|
||||
where
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
let range = simplify_range(range, self.len());
|
||||
self.erase_indices(range.start, self.entries.len());
|
||||
let entries = self.entries.split_off(range.end);
|
||||
let drained = self.entries.split_off(range.start);
|
||||
|
||||
let mut indices = RawTable::with_capacity(entries.len());
|
||||
raw::insert_bulk_no_grow(&mut indices, &entries);
|
||||
(Self { indices, entries }, drained.into_iter())
|
||||
}
|
||||
|
||||
/// Append from another map without checking whether items already exist.
|
||||
pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
|
||||
self.reserve(other.len());
|
||||
raw::insert_bulk_no_grow(&mut self.indices, &other.entries);
|
||||
self.entries.append(&mut other.entries);
|
||||
other.indices.clear();
|
||||
}
|
||||
|
||||
/// Reserve capacity for `additional` more key-value pairs.
|
||||
pub(crate) fn reserve(&mut self, additional: usize) {
|
||||
self.indices.reserve(additional, get_hash(&self.entries));
|
||||
// Only grow entries if necessary, since we also round up capacity.
|
||||
if additional > self.entries.capacity() - self.entries.len() {
|
||||
self.reserve_entries(additional);
|
||||
}
|
||||
}
|
||||
|
||||
/// Reserve entries capacity, rounded up to match the indices
|
||||
fn reserve_entries(&mut self, additional: usize) {
|
||||
// Use a soft-limit on the maximum capacity, but if the caller explicitly
|
||||
// requested more, do it and let them have the resulting panic.
|
||||
let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
|
||||
let try_add = new_capacity - self.entries.len();
|
||||
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
|
||||
return;
|
||||
}
|
||||
self.entries.reserve_exact(additional);
|
||||
}
|
||||
|
||||
/// Reserve capacity for `additional` more key-value pairs, without over-allocating.
|
||||
pub(crate) fn reserve_exact(&mut self, additional: usize) {
|
||||
self.indices.reserve(additional, get_hash(&self.entries));
|
||||
self.entries.reserve_exact(additional);
|
||||
}
|
||||
|
||||
/// Try to reserve capacity for `additional` more key-value pairs.
|
||||
pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
|
||||
self.indices
|
||||
.try_reserve(additional, get_hash(&self.entries))
|
||||
.map_err(TryReserveError::from_hashbrown)?;
|
||||
// Only grow entries if necessary, since we also round up capacity.
|
||||
if additional > self.entries.capacity() - self.entries.len() {
|
||||
self.try_reserve_entries(additional)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to reserve entries capacity, rounded up to match the indices
|
||||
fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
|
||||
// Use a soft-limit on the maximum capacity, but if the caller explicitly
|
||||
// requested more, do it and let them have the resulting error.
|
||||
let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
|
||||
let try_add = new_capacity - self.entries.len();
|
||||
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
self.entries
|
||||
.try_reserve_exact(additional)
|
||||
.map_err(TryReserveError::from_alloc)
|
||||
}
|
||||
|
||||
/// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
|
||||
pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
|
||||
self.indices
|
||||
.try_reserve(additional, get_hash(&self.entries))
|
||||
.map_err(TryReserveError::from_hashbrown)?;
|
||||
self.entries
|
||||
.try_reserve_exact(additional)
|
||||
.map_err(TryReserveError::from_alloc)
|
||||
}
|
||||
|
||||
/// Shrink the capacity of the map with a lower bound
|
||||
pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
|
||||
self.indices
|
||||
.shrink_to(min_capacity, get_hash(&self.entries));
|
||||
self.entries.shrink_to(min_capacity);
|
||||
}
|
||||
|
||||
/// Remove the last key-value pair
|
||||
pub(crate) fn pop(&mut self) -> Option<(K, V)> {
|
||||
if let Some(entry) = self.entries.pop() {
|
||||
let last = self.entries.len();
|
||||
erase_index(&mut self.indices, entry.hash, last);
|
||||
Some((entry.key, entry.value))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Append a key-value pair to `entries`, *without* checking whether it already exists.
|
||||
fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
|
||||
if self.entries.len() == self.entries.capacity() {
|
||||
// Reserve our own capacity synced to the indices,
|
||||
// rather than letting `Vec::push` just double it.
|
||||
self.reserve_entries(1);
|
||||
}
|
||||
self.entries.push(Bucket { hash, key, value });
|
||||
}
|
||||
|
||||
/// Insert a key-value pair in `entries` at a particular index,
|
||||
/// *without* checking whether it already exists.
|
||||
fn insert_entry(&mut self, index: usize, hash: HashValue, key: K, value: V) {
|
||||
if self.entries.len() == self.entries.capacity() {
|
||||
// Reserve our own capacity synced to the indices,
|
||||
// rather than letting `Vec::insert` just double it.
|
||||
self.reserve_entries(1);
|
||||
}
|
||||
self.entries.insert(index, Bucket { hash, key, value });
|
||||
}
|
||||
|
||||
/// Return the index in `entries` where an equivalent key can be found
|
||||
pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
|
||||
where
|
||||
Q: ?Sized + Equivalent<K>,
|
||||
{
|
||||
let eq = equivalent(key, &self.entries);
|
||||
self.indices.get(hash.get(), eq).copied()
|
||||
}
|
||||
|
||||
pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
|
||||
where
|
||||
K: Eq,
|
||||
{
|
||||
match self.find_or_insert(hash, &key) {
|
||||
Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))),
|
||||
Err(i) => {
|
||||
debug_assert_eq!(i, self.entries.len());
|
||||
self.push_entry(hash, key, value);
|
||||
(i, None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as `insert_full`, except it also replaces the key
|
||||
pub(crate) fn replace_full(
|
||||
&mut self,
|
||||
hash: HashValue,
|
||||
key: K,
|
||||
value: V,
|
||||
) -> (usize, Option<(K, V)>)
|
||||
where
|
||||
K: Eq,
|
||||
{
|
||||
match self.find_or_insert(hash, &key) {
|
||||
Ok(i) => {
|
||||
let entry = &mut self.entries[i];
|
||||
let kv = (
|
||||
mem::replace(&mut entry.key, key),
|
||||
mem::replace(&mut entry.value, value),
|
||||
);
|
||||
(i, Some(kv))
|
||||
}
|
||||
Err(i) => {
|
||||
debug_assert_eq!(i, self.entries.len());
|
||||
self.push_entry(hash, key, value);
|
||||
(i, None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> usize {
|
||||
let i = self.indices.len();
|
||||
self.indices.insert(hash.get(), i, get_hash(&self.entries));
|
||||
debug_assert_eq!(i, self.entries.len());
|
||||
self.push_entry(hash, key, value);
|
||||
i
|
||||
}
|
||||
|
||||
fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
|
||||
let end = self.indices.len();
|
||||
assert!(index <= end);
|
||||
// Increment others first so we don't have duplicate indices.
|
||||
self.increment_indices(index, end);
|
||||
let entries = &*self.entries;
|
||||
self.indices.insert(hash.get(), index, move |&i| {
|
||||
// Adjust for the incremented indices to find hashes.
|
||||
debug_assert_ne!(i, index);
|
||||
let i = if i < index { i } else { i - 1 };
|
||||
entries[i].hash.get()
|
||||
});
|
||||
self.insert_entry(index, hash, key, value);
|
||||
}
|
||||
|
||||
/// Remove an entry by shifting all entries that follow it
|
||||
pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
|
||||
where
|
||||
Q: ?Sized + Equivalent<K>,
|
||||
{
|
||||
let eq = equivalent(key, &self.entries);
|
||||
match self.indices.remove_entry(hash.get(), eq) {
|
||||
Some(index) => {
|
||||
let (key, value) = self.shift_remove_finish(index);
|
||||
Some((index, key, value))
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove an entry by shifting all entries that follow it
|
||||
pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
|
||||
match self.entries.get(index) {
|
||||
Some(entry) => {
|
||||
erase_index(&mut self.indices, entry.hash, index);
|
||||
Some(self.shift_remove_finish(index))
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove an entry by shifting all entries that follow it
|
||||
///
|
||||
/// The index should already be removed from `self.indices`.
|
||||
fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
|
||||
// Correct indices that point to the entries that followed the removed entry.
|
||||
self.decrement_indices(index + 1, self.entries.len());
|
||||
|
||||
// Use Vec::remove to actually remove the entry.
|
||||
let entry = self.entries.remove(index);
|
||||
(entry.key, entry.value)
|
||||
}
|
||||
|
||||
/// Decrement all indices in the range `start..end`.
|
||||
///
|
||||
/// The index `start - 1` should not exist in `self.indices`.
|
||||
/// All entries should still be in their original positions.
|
||||
fn decrement_indices(&mut self, start: usize, end: usize) {
|
||||
// Use a heuristic between a full sweep vs. a `find()` for every shifted item.
|
||||
let shifted_entries = &self.entries[start..end];
|
||||
if shifted_entries.len() > self.indices.buckets() / 2 {
|
||||
// Shift all indices in range.
|
||||
for i in self.indices_mut() {
|
||||
if start <= *i && *i < end {
|
||||
*i -= 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Find each entry in range to shift its index.
|
||||
for (i, entry) in (start..end).zip(shifted_entries) {
|
||||
update_index(&mut self.indices, entry.hash, i, i - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment all indices in the range `start..end`.
|
||||
///
|
||||
/// The index `end` should not exist in `self.indices`.
|
||||
/// All entries should still be in their original positions.
|
||||
fn increment_indices(&mut self, start: usize, end: usize) {
|
||||
// Use a heuristic between a full sweep vs. a `find()` for every shifted item.
|
||||
let shifted_entries = &self.entries[start..end];
|
||||
if shifted_entries.len() > self.indices.buckets() / 2 {
|
||||
// Shift all indices in range.
|
||||
for i in self.indices_mut() {
|
||||
if start <= *i && *i < end {
|
||||
*i += 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Find each entry in range to shift its index, updated in reverse so
|
||||
// we never have duplicated indices that might have a hash collision.
|
||||
for (i, entry) in (start..end).zip(shifted_entries).rev() {
|
||||
update_index(&mut self.indices, entry.hash, i, i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn move_index(&mut self, from: usize, to: usize) {
|
||||
let from_hash = self.entries[from].hash;
|
||||
if from != to {
|
||||
// Use a sentinel index so other indices don't collide.
|
||||
update_index(&mut self.indices, from_hash, from, usize::MAX);
|
||||
|
||||
// Update all other indices and rotate the entry positions.
|
||||
if from < to {
|
||||
self.decrement_indices(from + 1, to + 1);
|
||||
self.entries[from..=to].rotate_left(1);
|
||||
} else if to < from {
|
||||
self.increment_indices(to, from);
|
||||
self.entries[to..=from].rotate_right(1);
|
||||
}
|
||||
|
||||
// Change the sentinel index to its final position.
|
||||
update_index(&mut self.indices, from_hash, usize::MAX, to);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
|
||||
// If they're equal and in-bounds, there's nothing to do.
|
||||
if a == b && a < self.entries.len() {
|
||||
return;
|
||||
}
|
||||
|
||||
// We'll get a "nice" bounds-check from indexing `self.entries`,
|
||||
// and then we expect to find it in the table as well.
|
||||
let [ref_a, ref_b] = self
|
||||
.indices
|
||||
.get_many_mut(
|
||||
[self.entries[a].hash.get(), self.entries[b].hash.get()],
|
||||
move |i, &x| if i == 0 { x == a } else { x == b },
|
||||
)
|
||||
.expect("indices not found");
|
||||
|
||||
mem::swap(ref_a, ref_b);
|
||||
self.entries.swap(a, b);
|
||||
}
|
||||
|
||||
/// Remove an entry by swapping it with the last
|
||||
pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
|
||||
where
|
||||
Q: ?Sized + Equivalent<K>,
|
||||
{
|
||||
let eq = equivalent(key, &self.entries);
|
||||
match self.indices.remove_entry(hash.get(), eq) {
|
||||
Some(index) => {
|
||||
let (key, value) = self.swap_remove_finish(index);
|
||||
Some((index, key, value))
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove an entry by swapping it with the last
|
||||
pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
|
||||
match self.entries.get(index) {
|
||||
Some(entry) => {
|
||||
erase_index(&mut self.indices, entry.hash, index);
|
||||
Some(self.swap_remove_finish(index))
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Finish removing an entry by swapping it with the last
|
||||
///
|
||||
/// The index should already be removed from `self.indices`.
|
||||
fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
|
||||
// use swap_remove, but then we need to update the index that points
|
||||
// to the other entry that has to move
|
||||
let entry = self.entries.swap_remove(index);
|
||||
|
||||
// correct index that points to the entry that had to swap places
|
||||
if let Some(entry) = self.entries.get(index) {
|
||||
// was not last element
|
||||
// examine new element in `index` and find it in indices
|
||||
let last = self.entries.len();
|
||||
update_index(&mut self.indices, entry.hash, last, index);
|
||||
}
|
||||
|
||||
(entry.key, entry.value)
|
||||
}
|
||||
|
||||
/// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
|
||||
///
|
||||
/// All of these items should still be at their original location in `entries`.
|
||||
/// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
|
||||
fn erase_indices(&mut self, start: usize, end: usize) {
|
||||
let (init, shifted_entries) = self.entries.split_at(end);
|
||||
let (start_entries, erased_entries) = init.split_at(start);
|
||||
|
||||
let erased = erased_entries.len();
|
||||
let shifted = shifted_entries.len();
|
||||
let half_capacity = self.indices.buckets() / 2;
|
||||
|
||||
// Use a heuristic between different strategies
|
||||
if erased == 0 {
|
||||
// Degenerate case, nothing to do
|
||||
} else if start + shifted < half_capacity && start < erased {
|
||||
// Reinsert everything, as there are few kept indices
|
||||
self.indices.clear();
|
||||
|
||||
// Reinsert stable indices, then shifted indices
|
||||
raw::insert_bulk_no_grow(&mut self.indices, start_entries);
|
||||
raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
|
||||
} else if erased + shifted < half_capacity {
|
||||
// Find each affected index, as there are few to adjust
|
||||
|
||||
// Find erased indices
|
||||
for (i, entry) in (start..).zip(erased_entries) {
|
||||
erase_index(&mut self.indices, entry.hash, i);
|
||||
}
|
||||
|
||||
// Find shifted indices
|
||||
for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
|
||||
update_index(&mut self.indices, entry.hash, old, new);
|
||||
}
|
||||
} else {
|
||||
// Sweep the whole table for adjustments
|
||||
self.erase_indices_sweep(start, end);
|
||||
}
|
||||
|
||||
debug_assert_eq!(self.indices.len(), start + shifted);
|
||||
}
|
||||
|
||||
pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
|
||||
where
|
||||
F: FnMut(&mut K, &mut V) -> bool,
|
||||
{
|
||||
self.entries
|
||||
.retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
|
||||
if self.entries.len() < self.indices.len() {
|
||||
self.rebuild_hash_table();
|
||||
}
|
||||
}
|
||||
|
||||
fn rebuild_hash_table(&mut self) {
|
||||
self.indices.clear();
|
||||
raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
|
||||
}
|
||||
|
||||
pub(crate) fn reverse(&mut self) {
|
||||
self.entries.reverse();
|
||||
|
||||
// No need to save hash indices, can easily calculate what they should
|
||||
// be, given that this is an in-place reversal.
|
||||
let len = self.entries.len();
|
||||
for i in self.indices_mut() {
|
||||
*i = len - *i - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn assert_send_sync() {
|
||||
fn assert_send_sync<T: Send + Sync>() {}
|
||||
assert_send_sync::<IndexMapCore<i32, i32>>();
|
||||
assert_send_sync::<Entry<'_, i32, i32>>();
|
||||
assert_send_sync::<IndexedEntry<'_, i32, i32>>();
|
||||
}
|
||||
481
third-party/vendor/indexmap/src/map/core/entry.rs
vendored
Normal file
481
third-party/vendor/indexmap/src/map/core/entry.rs
vendored
Normal file
|
|
@ -0,0 +1,481 @@
|
|||
use super::raw::RawTableEntry;
|
||||
use super::IndexMapCore;
|
||||
use crate::HashValue;
|
||||
use core::{fmt, mem};
|
||||
|
||||
impl<K, V> IndexMapCore<K, V> {
|
||||
pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V>
|
||||
where
|
||||
K: Eq,
|
||||
{
|
||||
match self.raw_entry(hash, |k| *k == key) {
|
||||
Ok(raw) => Entry::Occupied(OccupiedEntry { raw }),
|
||||
Err(map) => Entry::Vacant(VacantEntry { map, hash, key }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry for an existing key-value pair in an [`IndexMap`][crate::IndexMap]
|
||||
/// or a vacant location to insert one.
|
||||
pub enum Entry<'a, K, V> {
|
||||
/// Existing slot with equivalent key.
|
||||
Occupied(OccupiedEntry<'a, K, V>),
|
||||
/// Vacant slot (no equivalent key in the map).
|
||||
Vacant(VacantEntry<'a, K, V>),
|
||||
}
|
||||
|
||||
impl<'a, K, V> Entry<'a, K, V> {
|
||||
/// Return the index where the key-value pair exists or will be inserted.
|
||||
pub fn index(&self) -> usize {
|
||||
match *self {
|
||||
Entry::Occupied(ref entry) => entry.index(),
|
||||
Entry::Vacant(ref entry) => entry.index(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts the given default value in the entry if it is vacant and returns a mutable
|
||||
/// reference to it. Otherwise a mutable reference to an already existent value is returned.
|
||||
///
|
||||
/// Computes in **O(1)** time (amortized average).
|
||||
pub fn or_insert(self, default: V) -> &'a mut V {
|
||||
match self {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => entry.insert(default),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
|
||||
/// reference to it. Otherwise a mutable reference to an already existent value is returned.
|
||||
///
|
||||
/// Computes in **O(1)** time (amortized average).
|
||||
pub fn or_insert_with<F>(self, call: F) -> &'a mut V
|
||||
where
|
||||
F: FnOnce() -> V,
|
||||
{
|
||||
match self {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => entry.insert(call()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts the result of the `call` function with a reference to the entry's key if it is
|
||||
/// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
|
||||
/// an already existent value is returned.
|
||||
///
|
||||
/// Computes in **O(1)** time (amortized average).
|
||||
pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
|
||||
where
|
||||
F: FnOnce(&K) -> V,
|
||||
{
|
||||
match self {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => {
|
||||
let value = call(&entry.key);
|
||||
entry.insert(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's key, either within the map if occupied,
|
||||
/// or else the new key that was used to find the entry.
|
||||
pub fn key(&self) -> &K {
|
||||
match *self {
|
||||
Entry::Occupied(ref entry) => entry.key(),
|
||||
Entry::Vacant(ref entry) => entry.key(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Modifies the entry if it is occupied.
|
||||
pub fn and_modify<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: FnOnce(&mut V),
|
||||
{
|
||||
if let Entry::Occupied(entry) = &mut self {
|
||||
f(entry.get_mut());
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
|
||||
/// reference to it. Otherwise a mutable reference to an already existent value is returned.
|
||||
///
|
||||
/// Computes in **O(1)** time (amortized average).
|
||||
pub fn or_default(self) -> &'a mut V
|
||||
where
|
||||
V: Default,
|
||||
{
|
||||
match self {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => entry.insert(V::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut tuple = f.debug_tuple("Entry");
|
||||
match self {
|
||||
Entry::Vacant(v) => tuple.field(v),
|
||||
Entry::Occupied(o) => tuple.field(o),
|
||||
};
|
||||
tuple.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into an occupied entry in an [`IndexMap`][crate::IndexMap].
|
||||
/// It is part of the [`Entry`] enum.
|
||||
pub struct OccupiedEntry<'a, K, V> {
|
||||
raw: RawTableEntry<'a, K, V>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> OccupiedEntry<'a, K, V> {
|
||||
/// Return the index of the key-value pair
|
||||
#[inline]
|
||||
pub fn index(&self) -> usize {
|
||||
self.raw.index()
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's key in the map.
|
||||
///
|
||||
/// Note that this is not the key that was used to find the entry. There may be an observable
|
||||
/// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
|
||||
/// extra fields or the memory address of an allocation.
|
||||
pub fn key(&self) -> &K {
|
||||
&self.raw.bucket().key
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's value in the map.
|
||||
pub fn get(&self) -> &V {
|
||||
&self.raw.bucket().value
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the entry's value in the map.
|
||||
///
|
||||
/// If you need a reference which may outlive the destruction of the
|
||||
/// [`Entry`] value, see [`into_mut`][Self::into_mut].
|
||||
pub fn get_mut(&mut self) -> &mut V {
|
||||
&mut self.raw.bucket_mut().value
|
||||
}
|
||||
|
||||
/// Converts into a mutable reference to the entry's value in the map,
|
||||
/// with a lifetime bound to the map itself.
|
||||
pub fn into_mut(self) -> &'a mut V {
|
||||
&mut self.raw.into_bucket().value
|
||||
}
|
||||
|
||||
/// Sets the value of the entry to `value`, and returns the entry's old value.
|
||||
pub fn insert(&mut self, value: V) -> V {
|
||||
mem::replace(self.get_mut(), value)
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this
|
||||
/// entry's position with the last element, and it is deprecated in favor of calling that
|
||||
/// explicitly. If you need to preserve the relative order of the keys in the map, use
|
||||
/// [`.shift_remove()`][Self::shift_remove] instead.
|
||||
#[deprecated(note = "`remove` disrupts the map order -- \
|
||||
use `swap_remove` or `shift_remove` for explicit behavior.")]
|
||||
pub fn remove(self) -> V {
|
||||
self.swap_remove()
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with
|
||||
/// the last element of the map and popping it off.
|
||||
/// **This perturbs the position of what used to be the last element!**
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_remove(self) -> V {
|
||||
self.swap_remove_entry().1
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the
|
||||
/// elements that follow it, preserving their relative order.
|
||||
/// **This perturbs the index of all of those elements!**
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_remove(self) -> V {
|
||||
self.shift_remove_entry().1
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry],
|
||||
/// replacing this entry's position with the last element, and it is deprecated in favor of
|
||||
/// calling that explicitly. If you need to preserve the relative order of the keys in the map,
|
||||
/// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead.
|
||||
#[deprecated(note = "`remove_entry` disrupts the map order -- \
|
||||
use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")]
|
||||
pub fn remove_entry(self) -> (K, V) {
|
||||
self.swap_remove_entry()
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with
|
||||
/// the last element of the map and popping it off.
|
||||
/// **This perturbs the position of what used to be the last element!**
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_remove_entry(self) -> (K, V) {
|
||||
let (map, index) = self.raw.remove_index();
|
||||
map.swap_remove_finish(index)
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the
|
||||
/// elements that follow it, preserving their relative order.
|
||||
/// **This perturbs the index of all of those elements!**
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_remove_entry(self) -> (K, V) {
|
||||
let (map, index) = self.raw.remove_index();
|
||||
map.shift_remove_finish(index)
|
||||
}
|
||||
|
||||
/// Moves the position of the entry to a new index
|
||||
/// by shifting all other entries in-between.
|
||||
///
|
||||
/// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`]
|
||||
/// coming `from` the current [`.index()`][Self::index].
|
||||
///
|
||||
/// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up.
|
||||
/// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down.
|
||||
///
|
||||
/// ***Panics*** if `to` is out of bounds.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn move_index(self, to: usize) {
|
||||
let (map, index) = self.raw.into_inner();
|
||||
map.move_index(index, to);
|
||||
}
|
||||
|
||||
/// Swaps the position of entry with another.
|
||||
///
|
||||
/// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`]
|
||||
/// with the current [`.index()`][Self::index] as one of the two being swapped.
|
||||
///
|
||||
/// ***Panics*** if the `other` index is out of bounds.
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_indices(self, other: usize) {
|
||||
let (map, index) = self.raw.into_inner();
|
||||
map.swap_indices(index, other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("OccupiedEntry")
|
||||
.field("key", self.key())
|
||||
.field("value", self.get())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into a vacant entry in an [`IndexMap`][crate::IndexMap].
|
||||
/// It is part of the [`Entry`] enum.
|
||||
pub struct VacantEntry<'a, K, V> {
|
||||
map: &'a mut IndexMapCore<K, V>,
|
||||
hash: HashValue,
|
||||
key: K,
|
||||
}
|
||||
|
||||
impl<'a, K, V> VacantEntry<'a, K, V> {
|
||||
/// Return the index where a key-value pair may be inserted.
|
||||
pub fn index(&self) -> usize {
|
||||
self.map.indices.len()
|
||||
}
|
||||
|
||||
/// Gets a reference to the key that was used to find the entry.
|
||||
pub fn key(&self) -> &K {
|
||||
&self.key
|
||||
}
|
||||
|
||||
/// Takes ownership of the key, leaving the entry vacant.
|
||||
pub fn into_key(self) -> K {
|
||||
self.key
|
||||
}
|
||||
|
||||
/// Inserts the entry's key and the given value into the map, and returns a mutable reference
|
||||
/// to the value.
|
||||
pub fn insert(self, value: V) -> &'a mut V {
|
||||
let Self { map, hash, key } = self;
|
||||
let i = map.insert_unique(hash, key, value);
|
||||
&mut map.entries[i].value
|
||||
}
|
||||
|
||||
/// Inserts the entry's key and the given value into the map at its ordered
|
||||
/// position among sorted keys, and returns the new index and a mutable
|
||||
/// reference to the value.
|
||||
///
|
||||
/// If the existing keys are **not** already sorted, then the insertion
|
||||
/// index is unspecified (like [`slice::binary_search`]), but the key-value
|
||||
/// pair is inserted at that position regardless.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn insert_sorted(self, value: V) -> (usize, &'a mut V)
|
||||
where
|
||||
K: Ord,
|
||||
{
|
||||
let slice = crate::map::Slice::from_slice(&self.map.entries);
|
||||
let i = slice.binary_search_keys(&self.key).unwrap_err();
|
||||
(i, self.shift_insert(i, value))
|
||||
}
|
||||
|
||||
/// Inserts the entry's key and the given value into the map at the given index,
|
||||
/// shifting others to the right, and returns a mutable reference to the value.
|
||||
///
|
||||
/// ***Panics*** if `index` is out of bounds.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_insert(self, index: usize, value: V) -> &'a mut V {
|
||||
let Self { map, hash, key } = self;
|
||||
map.shift_insert_unique(index, hash, key, value);
|
||||
&mut map.entries[index].value
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_tuple("VacantEntry").field(self.key()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into an occupied entry in an [`IndexMap`][crate::IndexMap] obtained by index.
|
||||
///
|
||||
/// This `struct` is created from the [`get_index_entry`][crate::IndexMap::get_index_entry] method.
|
||||
pub struct IndexedEntry<'a, K, V> {
|
||||
map: &'a mut IndexMapCore<K, V>,
|
||||
// We have a mutable reference to the map, which keeps the index
|
||||
// valid and pointing to the correct entry.
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<'a, K, V> IndexedEntry<'a, K, V> {
|
||||
pub(crate) fn new(map: &'a mut IndexMapCore<K, V>, index: usize) -> Self {
|
||||
Self { map, index }
|
||||
}
|
||||
|
||||
/// Return the index of the key-value pair
|
||||
#[inline]
|
||||
pub fn index(&self) -> usize {
|
||||
self.index
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's key in the map.
|
||||
pub fn key(&self) -> &K {
|
||||
&self.map.entries[self.index].key
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's value in the map.
|
||||
pub fn get(&self) -> &V {
|
||||
&self.map.entries[self.index].value
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the entry's value in the map.
|
||||
///
|
||||
/// If you need a reference which may outlive the destruction of the
|
||||
/// `IndexedEntry` value, see [`into_mut`][Self::into_mut].
|
||||
pub fn get_mut(&mut self) -> &mut V {
|
||||
&mut self.map.entries[self.index].value
|
||||
}
|
||||
|
||||
/// Sets the value of the entry to `value`, and returns the entry's old value.
|
||||
pub fn insert(&mut self, value: V) -> V {
|
||||
mem::replace(self.get_mut(), value)
|
||||
}
|
||||
|
||||
/// Converts into a mutable reference to the entry's value in the map,
|
||||
/// with a lifetime bound to the map itself.
|
||||
pub fn into_mut(self) -> &'a mut V {
|
||||
&mut self.map.entries[self.index].value
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with
|
||||
/// the last element of the map and popping it off.
|
||||
/// **This perturbs the position of what used to be the last element!**
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_remove_entry(self) -> (K, V) {
|
||||
self.map.swap_remove_index(self.index).unwrap()
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the
|
||||
/// elements that follow it, preserving their relative order.
|
||||
/// **This perturbs the index of all of those elements!**
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_remove_entry(self) -> (K, V) {
|
||||
self.map.shift_remove_index(self.index).unwrap()
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with
|
||||
/// the last element of the map and popping it off.
|
||||
/// **This perturbs the position of what used to be the last element!**
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_remove(self) -> V {
|
||||
self.swap_remove_entry().1
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the
|
||||
/// elements that follow it, preserving their relative order.
|
||||
/// **This perturbs the index of all of those elements!**
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_remove(self) -> V {
|
||||
self.shift_remove_entry().1
|
||||
}
|
||||
|
||||
/// Moves the position of the entry to a new index
|
||||
/// by shifting all other entries in-between.
|
||||
///
|
||||
/// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`]
|
||||
/// coming `from` the current [`.index()`][Self::index].
|
||||
///
|
||||
/// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up.
|
||||
/// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down.
|
||||
///
|
||||
/// ***Panics*** if `to` is out of bounds.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn move_index(self, to: usize) {
|
||||
self.map.move_index(self.index, to);
|
||||
}
|
||||
|
||||
/// Swaps the position of entry with another.
|
||||
///
|
||||
/// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`]
|
||||
/// with the current [`.index()`][Self::index] as one of the two being swapped.
|
||||
///
|
||||
/// ***Panics*** if the `other` index is out of bounds.
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_indices(self, other: usize) {
|
||||
self.map.swap_indices(self.index, other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IndexedEntry<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("IndexedEntry")
|
||||
.field("index", &self.index)
|
||||
.field("key", self.key())
|
||||
.field("value", self.get())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
153
third-party/vendor/indexmap/src/map/core/raw.rs
vendored
Normal file
153
third-party/vendor/indexmap/src/map/core/raw.rs
vendored
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
#![allow(unsafe_code)]
|
||||
//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
|
||||
//! mostly in dealing with its bucket "pointers".
|
||||
|
||||
use super::{equivalent, get_hash, Bucket, HashValue, IndexMapCore};
|
||||
use hashbrown::raw::RawTable;
|
||||
|
||||
type RawBucket = hashbrown::raw::Bucket<usize>;
|
||||
|
||||
/// Inserts many entries into a raw table without reallocating.
|
||||
///
|
||||
/// ***Panics*** if there is not sufficient capacity already.
|
||||
pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
|
||||
assert!(indices.capacity() - indices.len() >= entries.len());
|
||||
for entry in entries {
|
||||
// SAFETY: we asserted that sufficient capacity exists for all entries.
|
||||
unsafe {
|
||||
indices.insert_no_grow(entry.hash.get(), indices.len());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "test_debug")]
|
||||
pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
|
||||
|
||||
#[cfg(feature = "test_debug")]
|
||||
impl core::fmt::Debug for DebugIndices<'_> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
// SAFETY: we're not letting any of the buckets escape this function
|
||||
let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) };
|
||||
f.debug_list().entries(indices).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> IndexMapCore<K, V> {
|
||||
/// Sweep the whole table to erase indices start..end
|
||||
pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
|
||||
// SAFETY: we're not letting any of the buckets escape this function
|
||||
unsafe {
|
||||
let offset = end - start;
|
||||
for bucket in self.indices.iter() {
|
||||
let i = bucket.as_mut();
|
||||
if *i >= end {
|
||||
*i -= offset;
|
||||
} else if *i >= start {
|
||||
self.indices.erase(bucket);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Search for a key in the table and return `Ok(entry_index)` if found.
|
||||
/// Otherwise, insert the key and return `Err(new_index)`.
|
||||
///
|
||||
/// Note that hashbrown may resize the table to reserve space for insertion,
|
||||
/// even before checking if it's already present, so this is somewhat biased
|
||||
/// towards new items.
|
||||
pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result<usize, usize>
|
||||
where
|
||||
K: Eq,
|
||||
{
|
||||
let hash = hash.get();
|
||||
let eq = equivalent(key, &self.entries);
|
||||
let hasher = get_hash(&self.entries);
|
||||
// SAFETY: We're not mutating between find and read/insert.
|
||||
unsafe {
|
||||
match self.indices.find_or_find_insert_slot(hash, eq, hasher) {
|
||||
Ok(raw_bucket) => Ok(*raw_bucket.as_ref()),
|
||||
Err(slot) => {
|
||||
let index = self.indices.len();
|
||||
self.indices.insert_in_slot(hash, slot, index);
|
||||
Err(index)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn raw_entry(
|
||||
&mut self,
|
||||
hash: HashValue,
|
||||
mut is_match: impl FnMut(&K) -> bool,
|
||||
) -> Result<RawTableEntry<'_, K, V>, &mut Self> {
|
||||
let entries = &*self.entries;
|
||||
let eq = move |&i: &usize| is_match(&entries[i].key);
|
||||
match self.indices.find(hash.get(), eq) {
|
||||
// SAFETY: The entry is created with a live raw bucket, at the same time
|
||||
// we have a &mut reference to the map, so it can not be modified further.
|
||||
Some(raw_bucket) => Ok(RawTableEntry {
|
||||
map: self,
|
||||
raw_bucket,
|
||||
}),
|
||||
None => Err(self),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
|
||||
// SAFETY: we're not letting any of the buckets escape this function,
|
||||
// only the item references that are appropriately bound to `&mut self`.
|
||||
unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into an occupied raw entry in an `IndexMap`.
|
||||
// SAFETY: The lifetime of the map reference also constrains the raw bucket,
|
||||
// which is essentially a raw pointer into the map indices.
|
||||
pub(super) struct RawTableEntry<'a, K, V> {
|
||||
map: &'a mut IndexMapCore<K, V>,
|
||||
raw_bucket: RawBucket,
|
||||
}
|
||||
|
||||
// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
|
||||
// SAFETY: `&self` only accesses the bucket to read it.
|
||||
unsafe impl<K: Sync, V: Sync> Sync for RawTableEntry<'_, K, V> {}
|
||||
|
||||
impl<'a, K, V> RawTableEntry<'a, K, V> {
|
||||
/// Return the index of the key-value pair
|
||||
#[inline]
|
||||
pub(super) fn index(&self) -> usize {
|
||||
// SAFETY: we have `&mut map` keeping the bucket stable
|
||||
unsafe { *self.raw_bucket.as_ref() }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn bucket(&self) -> &Bucket<K, V> {
|
||||
&self.map.entries[self.index()]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn bucket_mut(&mut self) -> &mut Bucket<K, V> {
|
||||
let index = self.index();
|
||||
&mut self.map.entries[index]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn into_bucket(self) -> &'a mut Bucket<K, V> {
|
||||
let index = self.index();
|
||||
&mut self.map.entries[index]
|
||||
}
|
||||
|
||||
/// Remove the index from indices, leaving the actual entries to the caller.
|
||||
pub(super) fn remove_index(self) -> (&'a mut IndexMapCore<K, V>, usize) {
|
||||
// SAFETY: This is safe because it can only happen once (self is consumed)
|
||||
// and map.indices have not been modified since entry construction
|
||||
let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
|
||||
(self.map, index)
|
||||
}
|
||||
|
||||
/// Take no action, just return the index and the original map reference.
|
||||
pub(super) fn into_inner(self) -> (&'a mut IndexMapCore<K, V>, usize) {
|
||||
let index = self.index();
|
||||
(self.map, index)
|
||||
}
|
||||
}
|
||||
652
third-party/vendor/indexmap/src/map/core/raw_entry_v1.rs
vendored
Normal file
652
third-party/vendor/indexmap/src/map/core/raw_entry_v1.rs
vendored
Normal file
|
|
@ -0,0 +1,652 @@
|
|||
//! Opt-in access to the experimental raw entry API.
|
||||
//!
|
||||
//! This module is designed to mimic the raw entry API of [`HashMap`][std::collections::hash_map],
|
||||
//! matching its unstable state as of Rust 1.75. See the tracking issue
|
||||
//! [rust#56167](https://github.com/rust-lang/rust/issues/56167) for more details.
|
||||
//!
|
||||
//! The trait [`RawEntryApiV1`] and the `_v1` suffix on its methods are meant to insulate this for
|
||||
//! the future, in case later breaking changes are needed. If the standard library stabilizes its
|
||||
//! `hash_raw_entry` feature (or some replacement), matching *inherent* methods will be added to
|
||||
//! `IndexMap` without such an opt-in trait.
|
||||
|
||||
use super::raw::RawTableEntry;
|
||||
use super::IndexMapCore;
|
||||
use crate::{Equivalent, HashValue, IndexMap};
|
||||
use core::fmt;
|
||||
use core::hash::{BuildHasher, Hash, Hasher};
|
||||
use core::marker::PhantomData;
|
||||
use core::mem;
|
||||
|
||||
/// Opt-in access to the experimental raw entry API.
|
||||
///
|
||||
/// See the [`raw_entry_v1`][self] module documentation for more information.
|
||||
pub trait RawEntryApiV1<K, V, S>: private::Sealed {
|
||||
/// Creates a raw immutable entry builder for the [`IndexMap`].
|
||||
///
|
||||
/// Raw entries provide the lowest level of control for searching and
|
||||
/// manipulating a map. They must be manually initialized with a hash and
|
||||
/// then manually searched.
|
||||
///
|
||||
/// This is useful for
|
||||
/// * Hash memoization
|
||||
/// * Using a search key that doesn't work with the [`Equivalent`] trait
|
||||
/// * Using custom comparison logic without newtype wrappers
|
||||
///
|
||||
/// Unless you are in such a situation, higher-level and more foolproof APIs like
|
||||
/// [`get`][IndexMap::get] should be preferred.
|
||||
///
|
||||
/// Immutable raw entries have very limited use; you might instead want
|
||||
/// [`raw_entry_mut_v1`][Self::raw_entry_mut_v1].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use core::hash::{BuildHasher, Hash};
|
||||
/// use indexmap::map::{IndexMap, RawEntryApiV1};
|
||||
///
|
||||
/// let mut map = IndexMap::new();
|
||||
/// map.extend([("a", 100), ("b", 200), ("c", 300)]);
|
||||
///
|
||||
/// fn compute_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
|
||||
/// use core::hash::Hasher;
|
||||
/// let mut state = hash_builder.build_hasher();
|
||||
/// key.hash(&mut state);
|
||||
/// state.finish()
|
||||
/// }
|
||||
///
|
||||
/// for k in ["a", "b", "c", "d", "e", "f"] {
|
||||
/// let hash = compute_hash(map.hasher(), k);
|
||||
/// let i = map.get_index_of(k);
|
||||
/// let v = map.get(k);
|
||||
/// let kv = map.get_key_value(k);
|
||||
/// let ikv = map.get_full(k);
|
||||
///
|
||||
/// println!("Key: {} and value: {:?}", k, v);
|
||||
///
|
||||
/// assert_eq!(map.raw_entry_v1().from_key(k), kv);
|
||||
/// assert_eq!(map.raw_entry_v1().from_hash(hash, |q| *q == k), kv);
|
||||
/// assert_eq!(map.raw_entry_v1().from_key_hashed_nocheck(hash, k), kv);
|
||||
/// assert_eq!(map.raw_entry_v1().from_hash_full(hash, |q| *q == k), ikv);
|
||||
/// assert_eq!(map.raw_entry_v1().index_from_hash(hash, |q| *q == k), i);
|
||||
/// }
|
||||
/// ```
|
||||
fn raw_entry_v1(&self) -> RawEntryBuilder<'_, K, V, S>;
|
||||
|
||||
/// Creates a raw entry builder for the [`IndexMap`].
|
||||
///
|
||||
/// Raw entries provide the lowest level of control for searching and
|
||||
/// manipulating a map. They must be manually initialized with a hash and
|
||||
/// then manually searched. After this, insertions into a vacant entry
|
||||
/// still require an owned key to be provided.
|
||||
///
|
||||
/// Raw entries are useful for such exotic situations as:
|
||||
///
|
||||
/// * Hash memoization
|
||||
/// * Deferring the creation of an owned key until it is known to be required
|
||||
/// * Using a search key that doesn't work with the [`Equivalent`] trait
|
||||
/// * Using custom comparison logic without newtype wrappers
|
||||
///
|
||||
/// Because raw entries provide much more low-level control, it's much easier
|
||||
/// to put the `IndexMap` into an inconsistent state which, while memory-safe,
|
||||
/// will cause the map to produce seemingly random results. Higher-level and more
|
||||
/// foolproof APIs like [`entry`][IndexMap::entry] should be preferred when possible.
|
||||
///
|
||||
/// Raw entries give mutable access to the keys. This must not be used
|
||||
/// to modify how the key would compare or hash, as the map will not re-evaluate
|
||||
/// where the key should go, meaning the keys may become "lost" if their
|
||||
/// location does not reflect their state. For instance, if you change a key
|
||||
/// so that the map now contains keys which compare equal, search may start
|
||||
/// acting erratically, with two keys randomly masking each other. Implementations
|
||||
/// are free to assume this doesn't happen (within the limits of memory-safety).
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use core::hash::{BuildHasher, Hash};
|
||||
/// use indexmap::map::{IndexMap, RawEntryApiV1};
|
||||
/// use indexmap::map::raw_entry_v1::RawEntryMut;
|
||||
///
|
||||
/// let mut map = IndexMap::new();
|
||||
/// map.extend([("a", 100), ("b", 200), ("c", 300)]);
|
||||
///
|
||||
/// fn compute_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
|
||||
/// use core::hash::Hasher;
|
||||
/// let mut state = hash_builder.build_hasher();
|
||||
/// key.hash(&mut state);
|
||||
/// state.finish()
|
||||
/// }
|
||||
///
|
||||
/// // Existing key (insert and update)
|
||||
/// match map.raw_entry_mut_v1().from_key("a") {
|
||||
/// RawEntryMut::Vacant(_) => unreachable!(),
|
||||
/// RawEntryMut::Occupied(mut view) => {
|
||||
/// assert_eq!(view.index(), 0);
|
||||
/// assert_eq!(view.get(), &100);
|
||||
/// let v = view.get_mut();
|
||||
/// let new_v = (*v) * 10;
|
||||
/// *v = new_v;
|
||||
/// assert_eq!(view.insert(1111), 1000);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// assert_eq!(map["a"], 1111);
|
||||
/// assert_eq!(map.len(), 3);
|
||||
///
|
||||
/// // Existing key (take)
|
||||
/// let hash = compute_hash(map.hasher(), "c");
|
||||
/// match map.raw_entry_mut_v1().from_key_hashed_nocheck(hash, "c") {
|
||||
/// RawEntryMut::Vacant(_) => unreachable!(),
|
||||
/// RawEntryMut::Occupied(view) => {
|
||||
/// assert_eq!(view.index(), 2);
|
||||
/// assert_eq!(view.shift_remove_entry(), ("c", 300));
|
||||
/// }
|
||||
/// }
|
||||
/// assert_eq!(map.raw_entry_v1().from_key("c"), None);
|
||||
/// assert_eq!(map.len(), 2);
|
||||
///
|
||||
/// // Nonexistent key (insert and update)
|
||||
/// let key = "d";
|
||||
/// let hash = compute_hash(map.hasher(), key);
|
||||
/// match map.raw_entry_mut_v1().from_hash(hash, |q| *q == key) {
|
||||
/// RawEntryMut::Occupied(_) => unreachable!(),
|
||||
/// RawEntryMut::Vacant(view) => {
|
||||
/// assert_eq!(view.index(), 2);
|
||||
/// let (k, value) = view.insert("d", 4000);
|
||||
/// assert_eq!((*k, *value), ("d", 4000));
|
||||
/// *value = 40000;
|
||||
/// }
|
||||
/// }
|
||||
/// assert_eq!(map["d"], 40000);
|
||||
/// assert_eq!(map.len(), 3);
|
||||
///
|
||||
/// match map.raw_entry_mut_v1().from_hash(hash, |q| *q == key) {
|
||||
/// RawEntryMut::Vacant(_) => unreachable!(),
|
||||
/// RawEntryMut::Occupied(view) => {
|
||||
/// assert_eq!(view.index(), 2);
|
||||
/// assert_eq!(view.swap_remove_entry(), ("d", 40000));
|
||||
/// }
|
||||
/// }
|
||||
/// assert_eq!(map.get("d"), None);
|
||||
/// assert_eq!(map.len(), 2);
|
||||
/// ```
|
||||
fn raw_entry_mut_v1(&mut self) -> RawEntryBuilderMut<'_, K, V, S>;
|
||||
}
|
||||
|
||||
impl<K, V, S> RawEntryApiV1<K, V, S> for IndexMap<K, V, S> {
|
||||
fn raw_entry_v1(&self) -> RawEntryBuilder<'_, K, V, S> {
|
||||
RawEntryBuilder { map: self }
|
||||
}
|
||||
|
||||
fn raw_entry_mut_v1(&mut self) -> RawEntryBuilderMut<'_, K, V, S> {
|
||||
RawEntryBuilderMut { map: self }
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for computing where in an [`IndexMap`] a key-value pair would be stored.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::raw_entry_v1`] method, provided by the
|
||||
/// [`RawEntryApiV1`] trait. See its documentation for more.
|
||||
pub struct RawEntryBuilder<'a, K, V, S> {
|
||||
map: &'a IndexMap<K, V, S>,
|
||||
}
|
||||
|
||||
impl<K, V, S> fmt::Debug for RawEntryBuilder<'_, K, V, S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RawEntryBuilder").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S> {
|
||||
/// Access an entry by key.
|
||||
pub fn from_key<Q: ?Sized>(self, key: &Q) -> Option<(&'a K, &'a V)>
|
||||
where
|
||||
S: BuildHasher,
|
||||
Q: Hash + Equivalent<K>,
|
||||
{
|
||||
self.map.get_key_value(key)
|
||||
}
|
||||
|
||||
/// Access an entry by a key and its hash.
|
||||
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, key: &Q) -> Option<(&'a K, &'a V)>
|
||||
where
|
||||
Q: Equivalent<K>,
|
||||
{
|
||||
let hash = HashValue(hash as usize);
|
||||
let i = self.map.core.get_index_of(hash, key)?;
|
||||
self.map.get_index(i)
|
||||
}
|
||||
|
||||
/// Access an entry by hash.
|
||||
pub fn from_hash<F>(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)>
|
||||
where
|
||||
F: FnMut(&K) -> bool,
|
||||
{
|
||||
let map = self.map;
|
||||
let i = self.index_from_hash(hash, is_match)?;
|
||||
map.get_index(i)
|
||||
}
|
||||
|
||||
/// Access an entry by hash, including its index.
|
||||
pub fn from_hash_full<F>(self, hash: u64, is_match: F) -> Option<(usize, &'a K, &'a V)>
|
||||
where
|
||||
F: FnMut(&K) -> bool,
|
||||
{
|
||||
let map = self.map;
|
||||
let i = self.index_from_hash(hash, is_match)?;
|
||||
let (key, value) = map.get_index(i)?;
|
||||
Some((i, key, value))
|
||||
}
|
||||
|
||||
/// Access the index of an entry by hash.
|
||||
pub fn index_from_hash<F>(self, hash: u64, mut is_match: F) -> Option<usize>
|
||||
where
|
||||
F: FnMut(&K) -> bool,
|
||||
{
|
||||
let hash = HashValue(hash as usize);
|
||||
let entries = &*self.map.core.entries;
|
||||
let eq = move |&i: &usize| is_match(&entries[i].key);
|
||||
self.map.core.indices.get(hash.get(), eq).copied()
|
||||
}
|
||||
}
|
||||
|
||||
/// A builder for computing where in an [`IndexMap`] a key-value pair would be stored.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::raw_entry_mut_v1`] method, provided by the
|
||||
/// [`RawEntryApiV1`] trait. See its documentation for more.
|
||||
pub struct RawEntryBuilderMut<'a, K, V, S> {
|
||||
map: &'a mut IndexMap<K, V, S>,
|
||||
}
|
||||
|
||||
impl<K, V, S> fmt::Debug for RawEntryBuilderMut<'_, K, V, S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RawEntryBuilderMut").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> {
|
||||
/// Access an entry by key.
|
||||
pub fn from_key<Q: ?Sized>(self, key: &Q) -> RawEntryMut<'a, K, V, S>
|
||||
where
|
||||
S: BuildHasher,
|
||||
Q: Hash + Equivalent<K>,
|
||||
{
|
||||
let hash = self.map.hash(key);
|
||||
self.from_key_hashed_nocheck(hash.get(), key)
|
||||
}
|
||||
|
||||
/// Access an entry by a key and its hash.
|
||||
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, key: &Q) -> RawEntryMut<'a, K, V, S>
|
||||
where
|
||||
Q: Equivalent<K>,
|
||||
{
|
||||
self.from_hash(hash, |k| Q::equivalent(key, k))
|
||||
}
|
||||
|
||||
/// Access an entry by hash.
|
||||
pub fn from_hash<F>(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S>
|
||||
where
|
||||
F: FnMut(&K) -> bool,
|
||||
{
|
||||
let hash = HashValue(hash as usize);
|
||||
match self.map.core.raw_entry(hash, is_match) {
|
||||
Ok(raw) => RawEntryMut::Occupied(RawOccupiedEntryMut {
|
||||
raw,
|
||||
hash_builder: PhantomData,
|
||||
}),
|
||||
Err(map) => RawEntryMut::Vacant(RawVacantEntryMut {
|
||||
map,
|
||||
hash_builder: &self.map.hash_builder,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Raw entry for an existing key-value pair or a vacant location to
|
||||
/// insert one.
|
||||
pub enum RawEntryMut<'a, K, V, S> {
|
||||
/// Existing slot with equivalent key.
|
||||
Occupied(RawOccupiedEntryMut<'a, K, V, S>),
|
||||
/// Vacant slot (no equivalent key in the map).
|
||||
Vacant(RawVacantEntryMut<'a, K, V, S>),
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug, S> fmt::Debug for RawEntryMut<'_, K, V, S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut tuple = f.debug_tuple("RawEntryMut");
|
||||
match self {
|
||||
Self::Vacant(v) => tuple.field(v),
|
||||
Self::Occupied(o) => tuple.field(o),
|
||||
};
|
||||
tuple.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> RawEntryMut<'a, K, V, S> {
|
||||
/// Return the index where the key-value pair exists or may be inserted.
|
||||
#[inline]
|
||||
pub fn index(&self) -> usize {
|
||||
match self {
|
||||
Self::Occupied(entry) => entry.index(),
|
||||
Self::Vacant(entry) => entry.index(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts the given default key and value in the entry if it is vacant and returns mutable
|
||||
/// references to them. Otherwise mutable references to an already existent pair are returned.
|
||||
pub fn or_insert(self, default_key: K, default_value: V) -> (&'a mut K, &'a mut V)
|
||||
where
|
||||
K: Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
match self {
|
||||
Self::Occupied(entry) => entry.into_key_value_mut(),
|
||||
Self::Vacant(entry) => entry.insert(default_key, default_value),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts the result of the `call` function in the entry if it is vacant and returns mutable
|
||||
/// references to them. Otherwise mutable references to an already existent pair are returned.
|
||||
pub fn or_insert_with<F>(self, call: F) -> (&'a mut K, &'a mut V)
|
||||
where
|
||||
F: FnOnce() -> (K, V),
|
||||
K: Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
match self {
|
||||
Self::Occupied(entry) => entry.into_key_value_mut(),
|
||||
Self::Vacant(entry) => {
|
||||
let (key, value) = call();
|
||||
entry.insert(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Modifies the entry if it is occupied.
|
||||
pub fn and_modify<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: FnOnce(&mut K, &mut V),
|
||||
{
|
||||
if let Self::Occupied(entry) = &mut self {
|
||||
let (k, v) = entry.get_key_value_mut();
|
||||
f(k, v);
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// A raw view into an occupied entry in an [`IndexMap`].
|
||||
/// It is part of the [`RawEntryMut`] enum.
|
||||
pub struct RawOccupiedEntryMut<'a, K, V, S> {
|
||||
raw: RawTableEntry<'a, K, V>,
|
||||
hash_builder: PhantomData<&'a S>,
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug, S> fmt::Debug for RawOccupiedEntryMut<'_, K, V, S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RawOccupiedEntryMut")
|
||||
.field("key", self.key())
|
||||
.field("value", self.get())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> {
|
||||
/// Return the index of the key-value pair
|
||||
#[inline]
|
||||
pub fn index(&self) -> usize {
|
||||
self.raw.index()
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's key in the map.
|
||||
///
|
||||
/// Note that this is not the key that was used to find the entry. There may be an observable
|
||||
/// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
|
||||
/// extra fields or the memory address of an allocation.
|
||||
pub fn key(&self) -> &K {
|
||||
&self.raw.bucket().key
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the entry's key in the map.
|
||||
///
|
||||
/// Note that this is not the key that was used to find the entry. There may be an observable
|
||||
/// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
|
||||
/// extra fields or the memory address of an allocation.
|
||||
pub fn key_mut(&mut self) -> &mut K {
|
||||
&mut self.raw.bucket_mut().key
|
||||
}
|
||||
|
||||
/// Converts into a mutable reference to the entry's key in the map,
|
||||
/// with a lifetime bound to the map itself.
|
||||
///
|
||||
/// Note that this is not the key that was used to find the entry. There may be an observable
|
||||
/// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
|
||||
/// extra fields or the memory address of an allocation.
|
||||
pub fn into_key(self) -> &'a mut K {
|
||||
&mut self.raw.into_bucket().key
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's value in the map.
|
||||
pub fn get(&self) -> &V {
|
||||
&self.raw.bucket().value
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the entry's value in the map.
|
||||
///
|
||||
/// If you need a reference which may outlive the destruction of the
|
||||
/// [`RawEntryMut`] value, see [`into_mut`][Self::into_mut].
|
||||
pub fn get_mut(&mut self) -> &mut V {
|
||||
&mut self.raw.bucket_mut().value
|
||||
}
|
||||
|
||||
/// Converts into a mutable reference to the entry's value in the map,
|
||||
/// with a lifetime bound to the map itself.
|
||||
pub fn into_mut(self) -> &'a mut V {
|
||||
&mut self.raw.into_bucket().value
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's key and value in the map.
|
||||
pub fn get_key_value(&self) -> (&K, &V) {
|
||||
self.raw.bucket().refs()
|
||||
}
|
||||
|
||||
/// Gets a reference to the entry's key and value in the map.
|
||||
pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) {
|
||||
self.raw.bucket_mut().muts()
|
||||
}
|
||||
|
||||
/// Converts into a mutable reference to the entry's key and value in the map,
|
||||
/// with a lifetime bound to the map itself.
|
||||
pub fn into_key_value_mut(self) -> (&'a mut K, &'a mut V) {
|
||||
self.raw.into_bucket().muts()
|
||||
}
|
||||
|
||||
/// Sets the value of the entry, and returns the entry's old value.
|
||||
pub fn insert(&mut self, value: V) -> V {
|
||||
mem::replace(self.get_mut(), value)
|
||||
}
|
||||
|
||||
/// Sets the key of the entry, and returns the entry's old key.
|
||||
pub fn insert_key(&mut self, key: K) -> K {
|
||||
mem::replace(self.key_mut(), key)
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this
|
||||
/// entry's position with the last element, and it is deprecated in favor of calling that
|
||||
/// explicitly. If you need to preserve the relative order of the keys in the map, use
|
||||
/// [`.shift_remove()`][Self::shift_remove] instead.
|
||||
#[deprecated(note = "`remove` disrupts the map order -- \
|
||||
use `swap_remove` or `shift_remove` for explicit behavior.")]
|
||||
pub fn remove(self) -> V {
|
||||
self.swap_remove()
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with
|
||||
/// the last element of the map and popping it off.
|
||||
/// **This perturbs the position of what used to be the last element!**
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_remove(self) -> V {
|
||||
self.swap_remove_entry().1
|
||||
}
|
||||
|
||||
/// Remove the key, value pair stored in the map for this entry, and return the value.
|
||||
///
|
||||
/// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the
|
||||
/// elements that follow it, preserving their relative order.
|
||||
/// **This perturbs the index of all of those elements!**
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_remove(self) -> V {
|
||||
self.shift_remove_entry().1
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry],
|
||||
/// replacing this entry's position with the last element, and it is deprecated in favor of
|
||||
/// calling that explicitly. If you need to preserve the relative order of the keys in the map,
|
||||
/// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead.
|
||||
#[deprecated(note = "`remove_entry` disrupts the map order -- \
|
||||
use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")]
|
||||
pub fn remove_entry(self) -> (K, V) {
|
||||
self.swap_remove_entry()
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with
|
||||
/// the last element of the map and popping it off.
|
||||
/// **This perturbs the position of what used to be the last element!**
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_remove_entry(self) -> (K, V) {
|
||||
let (map, index) = self.raw.remove_index();
|
||||
map.swap_remove_finish(index)
|
||||
}
|
||||
|
||||
/// Remove and return the key, value pair stored in the map for this entry
|
||||
///
|
||||
/// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the
|
||||
/// elements that follow it, preserving their relative order.
|
||||
/// **This perturbs the index of all of those elements!**
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_remove_entry(self) -> (K, V) {
|
||||
let (map, index) = self.raw.remove_index();
|
||||
map.shift_remove_finish(index)
|
||||
}
|
||||
|
||||
/// Moves the position of the entry to a new index
|
||||
/// by shifting all other entries in-between.
|
||||
///
|
||||
/// This is equivalent to [`IndexMap::move_index`]
|
||||
/// coming `from` the current [`.index()`][Self::index].
|
||||
///
|
||||
/// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up.
|
||||
/// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down.
|
||||
///
|
||||
/// ***Panics*** if `to` is out of bounds.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn move_index(self, to: usize) {
|
||||
let (map, index) = self.raw.into_inner();
|
||||
map.move_index(index, to);
|
||||
}
|
||||
|
||||
/// Swaps the position of entry with another.
|
||||
///
|
||||
/// This is equivalent to [`IndexMap::swap_indices`]
|
||||
/// with the current [`.index()`][Self::index] as one of the two being swapped.
|
||||
///
|
||||
/// ***Panics*** if the `other` index is out of bounds.
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
pub fn swap_indices(self, other: usize) {
|
||||
let (map, index) = self.raw.into_inner();
|
||||
map.swap_indices(index, other)
|
||||
}
|
||||
}
|
||||
|
||||
/// A view into a vacant raw entry in an [`IndexMap`].
|
||||
/// It is part of the [`RawEntryMut`] enum.
|
||||
pub struct RawVacantEntryMut<'a, K, V, S> {
|
||||
map: &'a mut IndexMapCore<K, V>,
|
||||
hash_builder: &'a S,
|
||||
}
|
||||
|
||||
impl<K, V, S> fmt::Debug for RawVacantEntryMut<'_, K, V, S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RawVacantEntryMut").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> {
|
||||
/// Return the index where a key-value pair may be inserted.
|
||||
pub fn index(&self) -> usize {
|
||||
self.map.indices.len()
|
||||
}
|
||||
|
||||
/// Inserts the given key and value into the map,
|
||||
/// and returns mutable references to them.
|
||||
pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V)
|
||||
where
|
||||
K: Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
let mut h = self.hash_builder.build_hasher();
|
||||
key.hash(&mut h);
|
||||
self.insert_hashed_nocheck(h.finish(), key, value)
|
||||
}
|
||||
|
||||
/// Inserts the given key and value into the map with the provided hash,
|
||||
/// and returns mutable references to them.
|
||||
pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) {
|
||||
let hash = HashValue(hash as usize);
|
||||
let i = self.map.insert_unique(hash, key, value);
|
||||
self.map.entries[i].muts()
|
||||
}
|
||||
|
||||
/// Inserts the given key and value into the map at the given index,
|
||||
/// shifting others to the right, and returns mutable references to them.
|
||||
///
|
||||
/// ***Panics*** if `index` is out of bounds.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_insert(self, index: usize, key: K, value: V) -> (&'a mut K, &'a mut V)
|
||||
where
|
||||
K: Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
let mut h = self.hash_builder.build_hasher();
|
||||
key.hash(&mut h);
|
||||
self.shift_insert_hashed_nocheck(index, h.finish(), key, value)
|
||||
}
|
||||
|
||||
/// Inserts the given key and value into the map with the provided hash
|
||||
/// at the given index, and returns mutable references to them.
|
||||
///
|
||||
/// ***Panics*** if `index` is out of bounds.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
pub fn shift_insert_hashed_nocheck(
|
||||
self,
|
||||
index: usize,
|
||||
hash: u64,
|
||||
key: K,
|
||||
value: V,
|
||||
) -> (&'a mut K, &'a mut V) {
|
||||
let hash = HashValue(hash as usize);
|
||||
self.map.shift_insert_unique(index, hash, key, value);
|
||||
self.map.entries[index].muts()
|
||||
}
|
||||
}
|
||||
|
||||
mod private {
|
||||
pub trait Sealed {}
|
||||
|
||||
impl<K, V, S> Sealed for super::IndexMap<K, V, S> {}
|
||||
}
|
||||
713
third-party/vendor/indexmap/src/map/iter.rs
vendored
Normal file
713
third-party/vendor/indexmap/src/map/iter.rs
vendored
Normal file
|
|
@ -0,0 +1,713 @@
|
|||
use super::core::IndexMapCore;
|
||||
use super::{Bucket, Entries, IndexMap, Slice};
|
||||
|
||||
use alloc::vec::{self, Vec};
|
||||
use core::fmt;
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use core::iter::FusedIterator;
|
||||
use core::ops::{Index, RangeBounds};
|
||||
use core::slice;
|
||||
|
||||
impl<'a, K, V, S> IntoIterator for &'a IndexMap<K, V, S> {
|
||||
type Item = (&'a K, &'a V);
|
||||
type IntoIter = Iter<'a, K, V>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> IntoIterator for &'a mut IndexMap<K, V, S> {
|
||||
type Item = (&'a K, &'a mut V);
|
||||
type IntoIter = IterMut<'a, K, V>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> IntoIterator for IndexMap<K, V, S> {
|
||||
type Item = (K, V);
|
||||
type IntoIter = IntoIter<K, V>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
IntoIter::new(self.into_entries())
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::iter`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Iter<'a, K, V> {
|
||||
iter: slice::Iter<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> Iter<'a, K, V> {
|
||||
pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
|
||||
Self {
|
||||
iter: entries.iter(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &'a Slice<K, V> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> Iterator for Iter<'a, K, V> {
|
||||
type Item = (&'a K, &'a V);
|
||||
|
||||
iterator_methods!(Bucket::refs);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
|
||||
double_ended_iterator_methods!(Bucket::refs);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for Iter<'_, K, V> {}
|
||||
|
||||
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
|
||||
impl<K, V> Clone for Iter<'_, K, V> {
|
||||
fn clone(&self) -> Self {
|
||||
Iter {
|
||||
iter: self.iter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for Iter<'_, K, V> {
|
||||
fn default() -> Self {
|
||||
Self { iter: [].iter() }
|
||||
}
|
||||
}
|
||||
|
||||
/// A mutable iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::iter_mut`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct IterMut<'a, K, V> {
|
||||
iter: slice::IterMut<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> IterMut<'a, K, V> {
|
||||
pub(super) fn new(entries: &'a mut [Bucket<K, V>]) -> Self {
|
||||
Self {
|
||||
iter: entries.iter_mut(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &Slice<K, V> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
|
||||
/// Returns a mutable slice of the remaining entries in the iterator.
|
||||
///
|
||||
/// To avoid creating `&mut` references that alias, this is forced to consume the iterator.
|
||||
pub fn into_slice(self) -> &'a mut Slice<K, V> {
|
||||
Slice::from_mut_slice(self.iter.into_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
|
||||
type Item = (&'a K, &'a mut V);
|
||||
|
||||
iterator_methods!(Bucket::ref_mut);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
|
||||
double_ended_iterator_methods!(Bucket::ref_mut);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for IterMut<'_, K, V> {}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::refs);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for IterMut<'_, K, V> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
iter: [].iter_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An owning iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::into_iter`] method
|
||||
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
|
||||
pub struct IntoIter<K, V> {
|
||||
iter: vec::IntoIter<Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<K, V> IntoIter<K, V> {
|
||||
pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
|
||||
Self {
|
||||
iter: entries.into_iter(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &Slice<K, V> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
|
||||
/// Returns a mutable slice of the remaining entries in the iterator.
|
||||
pub fn as_mut_slice(&mut self) -> &mut Slice<K, V> {
|
||||
Slice::from_mut_slice(self.iter.as_mut_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Iterator for IntoIter<K, V> {
|
||||
type Item = (K, V);
|
||||
|
||||
iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
|
||||
double_ended_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for IntoIter<K, V> {}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::refs);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for IntoIter<K, V> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
iter: Vec::new().into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A draining iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::drain`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Drain<'a, K, V> {
|
||||
iter: vec::Drain<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> Drain<'a, K, V> {
|
||||
pub(super) fn new(iter: vec::Drain<'a, Bucket<K, V>>) -> Self {
|
||||
Self { iter }
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &Slice<K, V> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Iterator for Drain<'_, K, V> {
|
||||
type Item = (K, V);
|
||||
|
||||
iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for Drain<'_, K, V> {
|
||||
double_ended_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for Drain<'_, K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for Drain<'_, K, V> {}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Drain<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::refs);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over the keys of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::keys`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Keys<'a, K, V> {
|
||||
iter: slice::Iter<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> Keys<'a, K, V> {
|
||||
pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
|
||||
Self {
|
||||
iter: entries.iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> Iterator for Keys<'a, K, V> {
|
||||
type Item = &'a K;
|
||||
|
||||
iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for Keys<'_, K, V> {
|
||||
double_ended_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for Keys<'_, K, V> {}
|
||||
|
||||
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
|
||||
impl<K, V> Clone for Keys<'_, K, V> {
|
||||
fn clone(&self) -> Self {
|
||||
Keys {
|
||||
iter: self.iter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for Keys<'_, K, V> {
|
||||
fn default() -> Self {
|
||||
Self { iter: [].iter() }
|
||||
}
|
||||
}
|
||||
|
||||
/// Access [`IndexMap`] keys at indexed positions.
|
||||
///
|
||||
/// While [`Index<usize> for IndexMap`][values] accesses a map's values,
|
||||
/// indexing through [`IndexMap::keys`] offers an alternative to access a map's
|
||||
/// keys instead.
|
||||
///
|
||||
/// [values]: IndexMap#impl-Index<usize>-for-IndexMap<K,+V,+S>
|
||||
///
|
||||
/// Since `Keys` is also an iterator, consuming items from the iterator will
|
||||
/// offset the effective indexes. Similarly, if `Keys` is obtained from
|
||||
/// [`Slice::keys`], indexes will be interpreted relative to the position of
|
||||
/// that slice.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use indexmap::IndexMap;
|
||||
///
|
||||
/// let mut map = IndexMap::new();
|
||||
/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
|
||||
/// map.insert(word.to_lowercase(), word.to_uppercase());
|
||||
/// }
|
||||
///
|
||||
/// assert_eq!(map[0], "LOREM");
|
||||
/// assert_eq!(map.keys()[0], "lorem");
|
||||
/// assert_eq!(map[1], "IPSUM");
|
||||
/// assert_eq!(map.keys()[1], "ipsum");
|
||||
///
|
||||
/// map.reverse();
|
||||
/// assert_eq!(map.keys()[0], "amet");
|
||||
/// assert_eq!(map.keys()[1], "sit");
|
||||
///
|
||||
/// map.sort_keys();
|
||||
/// assert_eq!(map.keys()[0], "amet");
|
||||
/// assert_eq!(map.keys()[1], "dolor");
|
||||
///
|
||||
/// // Advancing the iterator will offset the indexing
|
||||
/// let mut keys = map.keys();
|
||||
/// assert_eq!(keys[0], "amet");
|
||||
/// assert_eq!(keys.next().map(|s| &**s), Some("amet"));
|
||||
/// assert_eq!(keys[0], "dolor");
|
||||
/// assert_eq!(keys[1], "ipsum");
|
||||
///
|
||||
/// // Slices may have an offset as well
|
||||
/// let slice = &map[2..];
|
||||
/// assert_eq!(slice[0], "IPSUM");
|
||||
/// assert_eq!(slice.keys()[0], "ipsum");
|
||||
/// ```
|
||||
///
|
||||
/// ```should_panic
|
||||
/// use indexmap::IndexMap;
|
||||
///
|
||||
/// let mut map = IndexMap::new();
|
||||
/// map.insert("foo", 1);
|
||||
/// println!("{:?}", map.keys()[10]); // panics!
|
||||
/// ```
|
||||
impl<'a, K, V> Index<usize> for Keys<'a, K, V> {
|
||||
type Output = K;
|
||||
|
||||
/// Returns a reference to the key at the supplied `index`.
|
||||
///
|
||||
/// ***Panics*** if `index` is out of bounds.
|
||||
fn index(&self, index: usize) -> &K {
|
||||
&self.iter.as_slice()[index].key
|
||||
}
|
||||
}
|
||||
|
||||
/// An owning iterator over the keys of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::into_keys`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct IntoKeys<K, V> {
|
||||
iter: vec::IntoIter<Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<K, V> IntoKeys<K, V> {
|
||||
pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
|
||||
Self {
|
||||
iter: entries.into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Iterator for IntoKeys<K, V> {
|
||||
type Item = K;
|
||||
|
||||
iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for IntoKeys<K, V> {
|
||||
double_ended_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for IntoKeys<K, V> {}
|
||||
|
||||
impl<K: fmt::Debug, V> fmt::Debug for IntoKeys<K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for IntoKeys<K, V> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
iter: Vec::new().into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over the values of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::values`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Values<'a, K, V> {
|
||||
iter: slice::Iter<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> Values<'a, K, V> {
|
||||
pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
|
||||
Self {
|
||||
iter: entries.iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> Iterator for Values<'a, K, V> {
|
||||
type Item = &'a V;
|
||||
|
||||
iterator_methods!(Bucket::value_ref);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for Values<'_, K, V> {
|
||||
double_ended_iterator_methods!(Bucket::value_ref);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for Values<'_, K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for Values<'_, K, V> {}
|
||||
|
||||
// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
|
||||
impl<K, V> Clone for Values<'_, K, V> {
|
||||
fn clone(&self) -> Self {
|
||||
Values {
|
||||
iter: self.iter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for Values<'_, K, V> {
|
||||
fn default() -> Self {
|
||||
Self { iter: [].iter() }
|
||||
}
|
||||
}
|
||||
|
||||
/// A mutable iterator over the values of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::values_mut`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ValuesMut<'a, K, V> {
|
||||
iter: slice::IterMut<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> ValuesMut<'a, K, V> {
|
||||
pub(super) fn new(entries: &'a mut [Bucket<K, V>]) -> Self {
|
||||
Self {
|
||||
iter: entries.iter_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
|
||||
type Item = &'a mut V;
|
||||
|
||||
iterator_methods!(Bucket::value_mut);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for ValuesMut<'_, K, V> {
|
||||
double_ended_iterator_methods!(Bucket::value_mut);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
|
||||
|
||||
impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for ValuesMut<'_, K, V> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
iter: [].iter_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An owning iterator over the values of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::into_values`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct IntoValues<K, V> {
|
||||
iter: vec::IntoIter<Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<K, V> IntoValues<K, V> {
|
||||
pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
|
||||
Self {
|
||||
iter: entries.into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Iterator for IntoValues<K, V> {
|
||||
type Item = V;
|
||||
|
||||
iterator_methods!(Bucket::value);
|
||||
}
|
||||
|
||||
impl<K, V> DoubleEndedIterator for IntoValues<K, V> {
|
||||
double_ended_iterator_methods!(Bucket::value);
|
||||
}
|
||||
|
||||
impl<K, V> ExactSizeIterator for IntoValues<K, V> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> FusedIterator for IntoValues<K, V> {}
|
||||
|
||||
impl<K, V: fmt::Debug> fmt::Debug for IntoValues<K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for IntoValues<K, V> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
iter: Vec::new().into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A splicing iterator for `IndexMap`.
|
||||
///
|
||||
/// This `struct` is created by [`IndexMap::splice()`].
|
||||
/// See its documentation for more.
|
||||
pub struct Splice<'a, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
map: &'a mut IndexMap<K, V, S>,
|
||||
tail: IndexMapCore<K, V>,
|
||||
drain: vec::IntoIter<Bucket<K, V>>,
|
||||
replace_with: I,
|
||||
}
|
||||
|
||||
impl<'a, I, K, V, S> Splice<'a, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
pub(super) fn new<R>(map: &'a mut IndexMap<K, V, S>, range: R, replace_with: I) -> Self
|
||||
where
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
let (tail, drain) = map.core.split_splice(range);
|
||||
Self {
|
||||
map,
|
||||
tail,
|
||||
drain,
|
||||
replace_with,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, K, V, S> Drop for Splice<'_, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
// Finish draining unconsumed items. We don't strictly *have* to do this
|
||||
// manually, since we already split it into separate memory, but it will
|
||||
// match the drop order of `vec::Splice` items this way.
|
||||
let _ = self.drain.nth(usize::MAX);
|
||||
|
||||
// Now insert all the new items. If a key matches an existing entry, it
|
||||
// keeps the original position and only replaces the value, like `insert`.
|
||||
while let Some((key, value)) = self.replace_with.next() {
|
||||
// Since the tail is disjoint, we can try to update it first,
|
||||
// or else insert (update or append) the primary map.
|
||||
let hash = self.map.hash(&key);
|
||||
if let Some(i) = self.tail.get_index_of(hash, &key) {
|
||||
self.tail.as_entries_mut()[i].value = value;
|
||||
} else {
|
||||
self.map.core.insert_full(hash, key, value);
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, re-append the tail
|
||||
self.map.core.append_unchecked(&mut self.tail);
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, K, V, S> Iterator for Splice<'_, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
type Item = (K, V);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.drain.next().map(Bucket::key_value)
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.drain.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, K, V, S> DoubleEndedIterator for Splice<'_, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
self.drain.next_back().map(Bucket::key_value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, K, V, S> ExactSizeIterator for Splice<'_, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.drain.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, K, V, S> FusedIterator for Splice<'_, I, K, V, S>
|
||||
where
|
||||
I: Iterator<Item = (K, V)>,
|
||||
K: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
}
|
||||
|
||||
impl<'a, I, K, V, S> fmt::Debug for Splice<'a, I, K, V, S>
|
||||
where
|
||||
I: fmt::Debug + Iterator<Item = (K, V)>,
|
||||
K: fmt::Debug + Hash + Eq,
|
||||
V: fmt::Debug,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// Follow `vec::Splice` in only printing the drain and replacement
|
||||
f.debug_struct("Splice")
|
||||
.field("drain", &self.drain)
|
||||
.field("replace_with", &self.replace_with)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
138
third-party/vendor/indexmap/src/map/serde_seq.rs
vendored
Normal file
138
third-party/vendor/indexmap/src/map/serde_seq.rs
vendored
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
//! Functions to serialize and deserialize an [`IndexMap`] as an ordered sequence.
|
||||
//!
|
||||
//! The default `serde` implementation serializes `IndexMap` as a normal map,
|
||||
//! but there is no guarantee that serialization formats will preserve the order
|
||||
//! of the key-value pairs. This module serializes `IndexMap` as a sequence of
|
||||
//! `(key, value)` elements instead, in order.
|
||||
//!
|
||||
//! This module may be used in a field attribute for derived implementations:
|
||||
//!
|
||||
//! ```
|
||||
//! # use indexmap::IndexMap;
|
||||
//! # use serde_derive::{Deserialize, Serialize};
|
||||
//! #[derive(Deserialize, Serialize)]
|
||||
//! struct Data {
|
||||
//! #[serde(with = "indexmap::map::serde_seq")]
|
||||
//! map: IndexMap<i32, u64>,
|
||||
//! // ...
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
|
||||
use serde::ser::{Serialize, Serializer};
|
||||
|
||||
use core::fmt::{self, Formatter};
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use crate::map::Slice as MapSlice;
|
||||
use crate::serde::cautious_capacity;
|
||||
use crate::set::Slice as SetSlice;
|
||||
use crate::IndexMap;
|
||||
|
||||
/// Serializes a [`map::Slice`][MapSlice] as an ordered sequence.
|
||||
///
|
||||
/// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence
|
||||
/// of `(key, value)` pairs, rather than as a map that might not preserve order.
|
||||
impl<K, V> Serialize for MapSlice<K, V>
|
||||
where
|
||||
K: Serialize,
|
||||
V: Serialize,
|
||||
{
|
||||
fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
|
||||
where
|
||||
T: Serializer,
|
||||
{
|
||||
serializer.collect_seq(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializes a [`set::Slice`][SetSlice] as an ordered sequence.
|
||||
impl<T> Serialize for SetSlice<T>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
fn serialize<Se>(&self, serializer: Se) -> Result<Se::Ok, Se::Error>
|
||||
where
|
||||
Se: Serializer,
|
||||
{
|
||||
serializer.collect_seq(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializes an [`IndexMap`] as an ordered sequence.
|
||||
///
|
||||
/// This function may be used in a field attribute for deriving [`Serialize`]:
|
||||
///
|
||||
/// ```
|
||||
/// # use indexmap::IndexMap;
|
||||
/// # use serde_derive::Serialize;
|
||||
/// #[derive(Serialize)]
|
||||
/// struct Data {
|
||||
/// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")]
|
||||
/// map: IndexMap<i32, u64>,
|
||||
/// // ...
|
||||
/// }
|
||||
/// ```
|
||||
pub fn serialize<K, V, S, T>(map: &IndexMap<K, V, S>, serializer: T) -> Result<T::Ok, T::Error>
|
||||
where
|
||||
K: Serialize,
|
||||
V: Serialize,
|
||||
T: Serializer,
|
||||
{
|
||||
serializer.collect_seq(map)
|
||||
}
|
||||
|
||||
/// Visitor to deserialize a *sequenced* `IndexMap`
|
||||
struct SeqVisitor<K, V, S>(PhantomData<(K, V, S)>);
|
||||
|
||||
impl<'de, K, V, S> Visitor<'de> for SeqVisitor<K, V, S>
|
||||
where
|
||||
K: Deserialize<'de> + Eq + Hash,
|
||||
V: Deserialize<'de>,
|
||||
S: Default + BuildHasher,
|
||||
{
|
||||
type Value = IndexMap<K, V, S>;
|
||||
|
||||
fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(formatter, "a sequenced map")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let capacity = cautious_capacity::<K, V>(seq.size_hint());
|
||||
let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default());
|
||||
|
||||
while let Some((key, value)) = seq.next_element()? {
|
||||
map.insert(key, value);
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserializes an [`IndexMap`] from an ordered sequence.
|
||||
///
|
||||
/// This function may be used in a field attribute for deriving [`Deserialize`]:
|
||||
///
|
||||
/// ```
|
||||
/// # use indexmap::IndexMap;
|
||||
/// # use serde_derive::Deserialize;
|
||||
/// #[derive(Deserialize)]
|
||||
/// struct Data {
|
||||
/// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")]
|
||||
/// map: IndexMap<i32, u64>,
|
||||
/// // ...
|
||||
/// }
|
||||
/// ```
|
||||
pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result<IndexMap<K, V, S>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
K: Deserialize<'de> + Eq + Hash,
|
||||
V: Deserialize<'de>,
|
||||
S: Default + BuildHasher,
|
||||
{
|
||||
deserializer.deserialize_seq(SeqVisitor(PhantomData))
|
||||
}
|
||||
540
third-party/vendor/indexmap/src/map/slice.rs
vendored
Normal file
540
third-party/vendor/indexmap/src/map/slice.rs
vendored
Normal file
|
|
@ -0,0 +1,540 @@
|
|||
use super::{
|
||||
Bucket, Entries, IndexMap, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values,
|
||||
ValuesMut,
|
||||
};
|
||||
use crate::util::try_simplify_range;
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
use core::cmp::Ordering;
|
||||
use core::fmt;
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::ops::{self, Bound, Index, IndexMut, RangeBounds};
|
||||
|
||||
/// A dynamically-sized slice of key-value pairs in an [`IndexMap`].
|
||||
///
|
||||
/// This supports indexed operations much like a `[(K, V)]` slice,
|
||||
/// but not any hashed operations on the map keys.
|
||||
///
|
||||
/// Unlike `IndexMap`, `Slice` does consider the order for [`PartialEq`]
|
||||
/// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`].
|
||||
#[repr(transparent)]
|
||||
pub struct Slice<K, V> {
|
||||
pub(crate) entries: [Bucket<K, V>],
|
||||
}
|
||||
|
||||
// SAFETY: `Slice<K, V>` is a transparent wrapper around `[Bucket<K, V>]`,
|
||||
// and reference lifetimes are bound together in function signatures.
|
||||
#[allow(unsafe_code)]
|
||||
impl<K, V> Slice<K, V> {
|
||||
pub(super) const fn from_slice(entries: &[Bucket<K, V>]) -> &Self {
|
||||
unsafe { &*(entries as *const [Bucket<K, V>] as *const Self) }
|
||||
}
|
||||
|
||||
pub(super) fn from_mut_slice(entries: &mut [Bucket<K, V>]) -> &mut Self {
|
||||
unsafe { &mut *(entries as *mut [Bucket<K, V>] as *mut Self) }
|
||||
}
|
||||
|
||||
pub(super) fn from_boxed(entries: Box<[Bucket<K, V>]>) -> Box<Self> {
|
||||
unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) }
|
||||
}
|
||||
|
||||
fn into_boxed(self: Box<Self>) -> Box<[Bucket<K, V>]> {
|
||||
unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket<K, V>]) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Slice<K, V> {
|
||||
pub(crate) fn into_entries(self: Box<Self>) -> Vec<Bucket<K, V>> {
|
||||
self.into_boxed().into_vec()
|
||||
}
|
||||
|
||||
/// Returns an empty slice.
|
||||
pub const fn new<'a>() -> &'a Self {
|
||||
Self::from_slice(&[])
|
||||
}
|
||||
|
||||
/// Returns an empty mutable slice.
|
||||
pub fn new_mut<'a>() -> &'a mut Self {
|
||||
Self::from_mut_slice(&mut [])
|
||||
}
|
||||
|
||||
/// Return the number of key-value pairs in the map slice.
|
||||
#[inline]
|
||||
pub const fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Returns true if the map slice contains no elements.
|
||||
#[inline]
|
||||
pub const fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
/// Get a key-value pair by index.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
pub fn get_index(&self, index: usize) -> Option<(&K, &V)> {
|
||||
self.entries.get(index).map(Bucket::refs)
|
||||
}
|
||||
|
||||
/// Get a key-value pair by index, with mutable access to the value.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> {
|
||||
self.entries.get_mut(index).map(Bucket::ref_mut)
|
||||
}
|
||||
|
||||
/// Returns a slice of key-value pairs in the given range of indices.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Self> {
|
||||
let range = try_simplify_range(range, self.entries.len())?;
|
||||
self.entries.get(range).map(Slice::from_slice)
|
||||
}
|
||||
|
||||
/// Returns a mutable slice of key-value pairs in the given range of indices.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
pub fn get_range_mut<R: RangeBounds<usize>>(&mut self, range: R) -> Option<&mut Self> {
|
||||
let range = try_simplify_range(range, self.entries.len())?;
|
||||
self.entries.get_mut(range).map(Slice::from_mut_slice)
|
||||
}
|
||||
|
||||
/// Get the first key-value pair.
|
||||
pub fn first(&self) -> Option<(&K, &V)> {
|
||||
self.entries.first().map(Bucket::refs)
|
||||
}
|
||||
|
||||
/// Get the first key-value pair, with mutable access to the value.
|
||||
pub fn first_mut(&mut self) -> Option<(&K, &mut V)> {
|
||||
self.entries.first_mut().map(Bucket::ref_mut)
|
||||
}
|
||||
|
||||
/// Get the last key-value pair.
|
||||
pub fn last(&self) -> Option<(&K, &V)> {
|
||||
self.entries.last().map(Bucket::refs)
|
||||
}
|
||||
|
||||
/// Get the last key-value pair, with mutable access to the value.
|
||||
pub fn last_mut(&mut self) -> Option<(&K, &mut V)> {
|
||||
self.entries.last_mut().map(Bucket::ref_mut)
|
||||
}
|
||||
|
||||
/// Divides one slice into two at an index.
|
||||
///
|
||||
/// ***Panics*** if `index > len`.
|
||||
pub fn split_at(&self, index: usize) -> (&Self, &Self) {
|
||||
let (first, second) = self.entries.split_at(index);
|
||||
(Self::from_slice(first), Self::from_slice(second))
|
||||
}
|
||||
|
||||
/// Divides one mutable slice into two at an index.
|
||||
///
|
||||
/// ***Panics*** if `index > len`.
|
||||
pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) {
|
||||
let (first, second) = self.entries.split_at_mut(index);
|
||||
(Self::from_mut_slice(first), Self::from_mut_slice(second))
|
||||
}
|
||||
|
||||
/// Returns the first key-value pair and the rest of the slice,
|
||||
/// or `None` if it is empty.
|
||||
pub fn split_first(&self) -> Option<((&K, &V), &Self)> {
|
||||
if let [first, rest @ ..] = &self.entries {
|
||||
Some((first.refs(), Self::from_slice(rest)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the first key-value pair and the rest of the slice,
|
||||
/// with mutable access to the value, or `None` if it is empty.
|
||||
pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> {
|
||||
if let [first, rest @ ..] = &mut self.entries {
|
||||
Some((first.ref_mut(), Self::from_mut_slice(rest)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the last key-value pair and the rest of the slice,
|
||||
/// or `None` if it is empty.
|
||||
pub fn split_last(&self) -> Option<((&K, &V), &Self)> {
|
||||
if let [rest @ .., last] = &self.entries {
|
||||
Some((last.refs(), Self::from_slice(rest)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the last key-value pair and the rest of the slice,
|
||||
/// with mutable access to the value, or `None` if it is empty.
|
||||
pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> {
|
||||
if let [rest @ .., last] = &mut self.entries {
|
||||
Some((last.ref_mut(), Self::from_mut_slice(rest)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an iterator over the key-value pairs of the map slice.
|
||||
pub fn iter(&self) -> Iter<'_, K, V> {
|
||||
Iter::new(&self.entries)
|
||||
}
|
||||
|
||||
/// Return an iterator over the key-value pairs of the map slice.
|
||||
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
|
||||
IterMut::new(&mut self.entries)
|
||||
}
|
||||
|
||||
/// Return an iterator over the keys of the map slice.
|
||||
pub fn keys(&self) -> Keys<'_, K, V> {
|
||||
Keys::new(&self.entries)
|
||||
}
|
||||
|
||||
/// Return an owning iterator over the keys of the map slice.
|
||||
pub fn into_keys(self: Box<Self>) -> IntoKeys<K, V> {
|
||||
IntoKeys::new(self.into_entries())
|
||||
}
|
||||
|
||||
/// Return an iterator over the values of the map slice.
|
||||
pub fn values(&self) -> Values<'_, K, V> {
|
||||
Values::new(&self.entries)
|
||||
}
|
||||
|
||||
/// Return an iterator over mutable references to the the values of the map slice.
|
||||
pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
|
||||
ValuesMut::new(&mut self.entries)
|
||||
}
|
||||
|
||||
/// Return an owning iterator over the values of the map slice.
|
||||
pub fn into_values(self: Box<Self>) -> IntoValues<K, V> {
|
||||
IntoValues::new(self.into_entries())
|
||||
}
|
||||
|
||||
/// Search over a sorted map for a key.
|
||||
///
|
||||
/// Returns the position where that key is present, or the position where it can be inserted to
|
||||
/// maintain the sort. See [`slice::binary_search`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time, which is notably less scalable than looking the key up in
|
||||
/// the map this is a slice from using [`IndexMap::get_index_of`], but this can also position
|
||||
/// missing keys.
|
||||
pub fn binary_search_keys(&self, x: &K) -> Result<usize, usize>
|
||||
where
|
||||
K: Ord,
|
||||
{
|
||||
self.binary_search_by(|p, _| p.cmp(x))
|
||||
}
|
||||
|
||||
/// Search over a sorted map with a comparator function.
|
||||
///
|
||||
/// Returns the position where that value is present, or the position where it can be inserted
|
||||
/// to maintain the sort. See [`slice::binary_search_by`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time.
|
||||
#[inline]
|
||||
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
|
||||
where
|
||||
F: FnMut(&'a K, &'a V) -> Ordering,
|
||||
{
|
||||
self.entries.binary_search_by(move |a| f(&a.key, &a.value))
|
||||
}
|
||||
|
||||
/// Search over a sorted map with an extraction function.
|
||||
///
|
||||
/// Returns the position where that value is present, or the position where it can be inserted
|
||||
/// to maintain the sort. See [`slice::binary_search_by_key`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time.
|
||||
#[inline]
|
||||
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
|
||||
where
|
||||
F: FnMut(&'a K, &'a V) -> B,
|
||||
B: Ord,
|
||||
{
|
||||
self.binary_search_by(|k, v| f(k, v).cmp(b))
|
||||
}
|
||||
|
||||
/// Returns the index of the partition point of a sorted map according to the given predicate
|
||||
/// (the index of the first element of the second partition).
|
||||
///
|
||||
/// See [`slice::partition_point`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time.
|
||||
#[must_use]
|
||||
pub fn partition_point<P>(&self, mut pred: P) -> usize
|
||||
where
|
||||
P: FnMut(&K, &V) -> bool,
|
||||
{
|
||||
self.entries
|
||||
.partition_point(move |a| pred(&a.key, &a.value))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> IntoIterator for &'a Slice<K, V> {
|
||||
type IntoIter = Iter<'a, K, V>;
|
||||
type Item = (&'a K, &'a V);
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> IntoIterator for &'a mut Slice<K, V> {
|
||||
type IntoIter = IterMut<'a, K, V>;
|
||||
type Item = (&'a K, &'a mut V);
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> IntoIterator for Box<Slice<K, V>> {
|
||||
type IntoIter = IntoIter<K, V>;
|
||||
type Item = (K, V);
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
IntoIter::new(self.into_entries())
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for &'_ Slice<K, V> {
|
||||
fn default() -> Self {
|
||||
Slice::from_slice(&[])
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for &'_ mut Slice<K, V> {
|
||||
fn default() -> Self {
|
||||
Slice::from_mut_slice(&mut [])
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for Box<Slice<K, V>> {
|
||||
fn default() -> Self {
|
||||
Slice::from_boxed(Box::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Clone, V: Clone> Clone for Box<Slice<K, V>> {
|
||||
fn clone(&self) -> Self {
|
||||
Slice::from_boxed(self.entries.to_vec().into_boxed_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Copy, V: Copy> From<&Slice<K, V>> for Box<Slice<K, V>> {
|
||||
fn from(slice: &Slice<K, V>) -> Self {
|
||||
Slice::from_boxed(Box::from(&slice.entries))
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Slice<K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: PartialEq, V: PartialEq> PartialEq for Slice<K, V> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.len() == other.len() && self.iter().eq(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Eq, V: Eq> Eq for Slice<K, V> {}
|
||||
|
||||
impl<K: PartialOrd, V: PartialOrd> PartialOrd for Slice<K, V> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
self.iter().partial_cmp(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Ord, V: Ord> Ord for Slice<K, V> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.iter().cmp(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Hash, V: Hash> Hash for Slice<K, V> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.len().hash(state);
|
||||
for (key, value) in self {
|
||||
key.hash(state);
|
||||
value.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Index<usize> for Slice<K, V> {
|
||||
type Output = V;
|
||||
|
||||
fn index(&self, index: usize) -> &V {
|
||||
&self.entries[index].value
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> IndexMut<usize> for Slice<K, V> {
|
||||
fn index_mut(&mut self, index: usize) -> &mut V {
|
||||
&mut self.entries[index].value
|
||||
}
|
||||
}
|
||||
|
||||
// We can't have `impl<I: RangeBounds<usize>> Index<I>` because that conflicts
|
||||
// both upstream with `Index<usize>` and downstream with `Index<&Q>`.
|
||||
// Instead, we repeat the implementations for all the core range types.
|
||||
macro_rules! impl_index {
|
||||
($($range:ty),*) => {$(
|
||||
impl<K, V, S> Index<$range> for IndexMap<K, V, S> {
|
||||
type Output = Slice<K, V>;
|
||||
|
||||
fn index(&self, range: $range) -> &Self::Output {
|
||||
Slice::from_slice(&self.as_entries()[range])
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> IndexMut<$range> for IndexMap<K, V, S> {
|
||||
fn index_mut(&mut self, range: $range) -> &mut Self::Output {
|
||||
Slice::from_mut_slice(&mut self.as_entries_mut()[range])
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Index<$range> for Slice<K, V> {
|
||||
type Output = Slice<K, V>;
|
||||
|
||||
fn index(&self, range: $range) -> &Self {
|
||||
Self::from_slice(&self.entries[range])
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> IndexMut<$range> for Slice<K, V> {
|
||||
fn index_mut(&mut self, range: $range) -> &mut Self {
|
||||
Self::from_mut_slice(&mut self.entries[range])
|
||||
}
|
||||
}
|
||||
)*}
|
||||
}
|
||||
impl_index!(
|
||||
ops::Range<usize>,
|
||||
ops::RangeFrom<usize>,
|
||||
ops::RangeFull,
|
||||
ops::RangeInclusive<usize>,
|
||||
ops::RangeTo<usize>,
|
||||
ops::RangeToInclusive<usize>,
|
||||
(Bound<usize>, Bound<usize>)
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[test]
|
||||
fn slice_index() {
|
||||
fn check(
|
||||
vec_slice: &[(i32, i32)],
|
||||
map_slice: &Slice<i32, i32>,
|
||||
sub_slice: &Slice<i32, i32>,
|
||||
) {
|
||||
assert_eq!(map_slice as *const _, sub_slice as *const _);
|
||||
itertools::assert_equal(
|
||||
vec_slice.iter().copied(),
|
||||
map_slice.iter().map(|(&k, &v)| (k, v)),
|
||||
);
|
||||
itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys());
|
||||
itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values());
|
||||
}
|
||||
|
||||
let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect();
|
||||
let map: IndexMap<i32, i32> = vec.iter().cloned().collect();
|
||||
let slice = map.as_slice();
|
||||
|
||||
// RangeFull
|
||||
check(&vec[..], &map[..], &slice[..]);
|
||||
|
||||
for i in 0usize..10 {
|
||||
// Index
|
||||
assert_eq!(vec[i].1, map[i]);
|
||||
assert_eq!(vec[i].1, slice[i]);
|
||||
assert_eq!(map[&(i as i32)], map[i]);
|
||||
assert_eq!(map[&(i as i32)], slice[i]);
|
||||
|
||||
// RangeFrom
|
||||
check(&vec[i..], &map[i..], &slice[i..]);
|
||||
|
||||
// RangeTo
|
||||
check(&vec[..i], &map[..i], &slice[..i]);
|
||||
|
||||
// RangeToInclusive
|
||||
check(&vec[..=i], &map[..=i], &slice[..=i]);
|
||||
|
||||
// (Bound<usize>, Bound<usize>)
|
||||
let bounds = (Bound::Excluded(i), Bound::Unbounded);
|
||||
check(&vec[i + 1..], &map[bounds], &slice[bounds]);
|
||||
|
||||
for j in i..=10 {
|
||||
// Range
|
||||
check(&vec[i..j], &map[i..j], &slice[i..j]);
|
||||
}
|
||||
|
||||
for j in i..10 {
|
||||
// RangeInclusive
|
||||
check(&vec[i..=j], &map[i..=j], &slice[i..=j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slice_index_mut() {
|
||||
fn check_mut(
|
||||
vec_slice: &[(i32, i32)],
|
||||
map_slice: &mut Slice<i32, i32>,
|
||||
sub_slice: &mut Slice<i32, i32>,
|
||||
) {
|
||||
assert_eq!(map_slice, sub_slice);
|
||||
itertools::assert_equal(
|
||||
vec_slice.iter().copied(),
|
||||
map_slice.iter_mut().map(|(&k, &mut v)| (k, v)),
|
||||
);
|
||||
itertools::assert_equal(
|
||||
vec_slice.iter().map(|&(_, v)| v),
|
||||
map_slice.values_mut().map(|&mut v| v),
|
||||
);
|
||||
}
|
||||
|
||||
let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect();
|
||||
let mut map: IndexMap<i32, i32> = vec.iter().cloned().collect();
|
||||
let mut map2 = map.clone();
|
||||
let slice = map2.as_mut_slice();
|
||||
|
||||
// RangeFull
|
||||
check_mut(&vec[..], &mut map[..], &mut slice[..]);
|
||||
|
||||
for i in 0usize..10 {
|
||||
// IndexMut
|
||||
assert_eq!(&mut map[i], &mut slice[i]);
|
||||
|
||||
// RangeFrom
|
||||
check_mut(&vec[i..], &mut map[i..], &mut slice[i..]);
|
||||
|
||||
// RangeTo
|
||||
check_mut(&vec[..i], &mut map[..i], &mut slice[..i]);
|
||||
|
||||
// RangeToInclusive
|
||||
check_mut(&vec[..=i], &mut map[..=i], &mut slice[..=i]);
|
||||
|
||||
// (Bound<usize>, Bound<usize>)
|
||||
let bounds = (Bound::Excluded(i), Bound::Unbounded);
|
||||
check_mut(&vec[i + 1..], &mut map[bounds], &mut slice[bounds]);
|
||||
|
||||
for j in i..=10 {
|
||||
// Range
|
||||
check_mut(&vec[i..j], &mut map[i..j], &mut slice[i..j]);
|
||||
}
|
||||
|
||||
for j in i..10 {
|
||||
// RangeInclusive
|
||||
check_mut(&vec[i..=j], &mut map[i..=j], &mut slice[i..=j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
727
third-party/vendor/indexmap/src/map/tests.rs
vendored
Normal file
727
third-party/vendor/indexmap/src/map/tests.rs
vendored
Normal file
|
|
@ -0,0 +1,727 @@
|
|||
use super::*;
|
||||
use std::string::String;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let mut map = IndexMap::new();
|
||||
assert_eq!(map.is_empty(), true);
|
||||
map.insert(1, ());
|
||||
map.insert(1, ());
|
||||
assert_eq!(map.len(), 1);
|
||||
assert!(map.get(&1).is_some());
|
||||
assert_eq!(map.is_empty(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new() {
|
||||
let map = IndexMap::<String, String>::new();
|
||||
println!("{:?}", map);
|
||||
assert_eq!(map.capacity(), 0);
|
||||
assert_eq!(map.len(), 0);
|
||||
assert_eq!(map.is_empty(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5];
|
||||
let not_present = [1, 3, 6, 9, 10];
|
||||
let mut map = IndexMap::with_capacity(insert.len());
|
||||
|
||||
for (i, &elt) in insert.iter().enumerate() {
|
||||
assert_eq!(map.len(), i);
|
||||
map.insert(elt, elt);
|
||||
assert_eq!(map.len(), i + 1);
|
||||
assert_eq!(map.get(&elt), Some(&elt));
|
||||
assert_eq!(map[&elt], elt);
|
||||
}
|
||||
println!("{:?}", map);
|
||||
|
||||
for &elt in ¬_present {
|
||||
assert!(map.get(&elt).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_full() {
|
||||
let insert = vec![9, 2, 7, 1, 4, 6, 13];
|
||||
let present = vec![1, 6, 2];
|
||||
let mut map = IndexMap::with_capacity(insert.len());
|
||||
|
||||
for (i, &elt) in insert.iter().enumerate() {
|
||||
assert_eq!(map.len(), i);
|
||||
let (index, existing) = map.insert_full(elt, elt);
|
||||
assert_eq!(existing, None);
|
||||
assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
|
||||
assert_eq!(map.len(), i + 1);
|
||||
}
|
||||
|
||||
let len = map.len();
|
||||
for &elt in &present {
|
||||
let (index, existing) = map.insert_full(elt, elt);
|
||||
assert_eq!(existing, Some(elt));
|
||||
assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
|
||||
assert_eq!(map.len(), len);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_2() {
|
||||
let mut map = IndexMap::with_capacity(16);
|
||||
|
||||
let mut keys = vec![];
|
||||
keys.extend(0..16);
|
||||
keys.extend(if cfg!(miri) { 32..64 } else { 128..267 });
|
||||
|
||||
for &i in &keys {
|
||||
let old_map = map.clone();
|
||||
map.insert(i, ());
|
||||
for key in old_map.keys() {
|
||||
if map.get(key).is_none() {
|
||||
println!("old_map: {:?}", old_map);
|
||||
println!("map: {:?}", map);
|
||||
panic!("did not find {} in map", key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for &i in &keys {
|
||||
assert!(map.get(&i).is_some(), "did not find {}", i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_order() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
for &elt in &insert {
|
||||
map.insert(elt, ());
|
||||
}
|
||||
|
||||
assert_eq!(map.keys().count(), map.len());
|
||||
assert_eq!(map.keys().count(), insert.len());
|
||||
for (a, b) in insert.iter().zip(map.keys()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
for (i, k) in (0..insert.len()).zip(map.keys()) {
|
||||
assert_eq!(map.get_index(i).unwrap().0, k);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shift_insert() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
for &elt in &insert {
|
||||
map.shift_insert(0, elt, ());
|
||||
}
|
||||
|
||||
assert_eq!(map.keys().count(), map.len());
|
||||
assert_eq!(map.keys().count(), insert.len());
|
||||
for (a, b) in insert.iter().rev().zip(map.keys()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
for (i, k) in (0..insert.len()).zip(map.keys()) {
|
||||
assert_eq!(map.get_index(i).unwrap().0, k);
|
||||
}
|
||||
|
||||
// "insert" that moves an existing entry
|
||||
map.shift_insert(0, insert[0], ());
|
||||
assert_eq!(map.keys().count(), insert.len());
|
||||
assert_eq!(insert[0], map.keys()[0]);
|
||||
for (a, b) in insert[1..].iter().rev().zip(map.keys().skip(1)) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn grow() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11];
|
||||
let not_present = [1, 3, 6, 9, 10];
|
||||
let mut map = IndexMap::with_capacity(insert.len());
|
||||
|
||||
for (i, &elt) in insert.iter().enumerate() {
|
||||
assert_eq!(map.len(), i);
|
||||
map.insert(elt, elt);
|
||||
assert_eq!(map.len(), i + 1);
|
||||
assert_eq!(map.get(&elt), Some(&elt));
|
||||
assert_eq!(map[&elt], elt);
|
||||
}
|
||||
|
||||
println!("{:?}", map);
|
||||
for &elt in &insert {
|
||||
map.insert(elt * 10, elt);
|
||||
}
|
||||
for &elt in &insert {
|
||||
map.insert(elt * 100, elt);
|
||||
}
|
||||
for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
|
||||
map.insert(elt * 100 + i as i32, elt);
|
||||
}
|
||||
println!("{:?}", map);
|
||||
for &elt in ¬_present {
|
||||
assert!(map.get(&elt).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reserve() {
|
||||
let mut map = IndexMap::<usize, usize>::new();
|
||||
assert_eq!(map.capacity(), 0);
|
||||
map.reserve(100);
|
||||
let capacity = map.capacity();
|
||||
assert!(capacity >= 100);
|
||||
for i in 0..capacity {
|
||||
assert_eq!(map.len(), i);
|
||||
map.insert(i, i * i);
|
||||
assert_eq!(map.len(), i + 1);
|
||||
assert_eq!(map.capacity(), capacity);
|
||||
assert_eq!(map.get(&i), Some(&(i * i)));
|
||||
}
|
||||
map.insert(capacity, std::usize::MAX);
|
||||
assert_eq!(map.len(), capacity + 1);
|
||||
assert!(map.capacity() > capacity);
|
||||
assert_eq!(map.get(&capacity), Some(&std::usize::MAX));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_reserve() {
|
||||
let mut map = IndexMap::<usize, usize>::new();
|
||||
assert_eq!(map.capacity(), 0);
|
||||
assert_eq!(map.try_reserve(100), Ok(()));
|
||||
assert!(map.capacity() >= 100);
|
||||
assert!(map.try_reserve(usize::MAX).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shrink_to_fit() {
|
||||
let mut map = IndexMap::<usize, usize>::new();
|
||||
assert_eq!(map.capacity(), 0);
|
||||
for i in 0..100 {
|
||||
assert_eq!(map.len(), i);
|
||||
map.insert(i, i * i);
|
||||
assert_eq!(map.len(), i + 1);
|
||||
assert!(map.capacity() >= i + 1);
|
||||
assert_eq!(map.get(&i), Some(&(i * i)));
|
||||
map.shrink_to_fit();
|
||||
assert_eq!(map.len(), i + 1);
|
||||
assert_eq!(map.capacity(), i + 1);
|
||||
assert_eq!(map.get(&i), Some(&(i * i)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
for &elt in &insert {
|
||||
map.insert(elt, elt);
|
||||
}
|
||||
|
||||
assert_eq!(map.keys().count(), map.len());
|
||||
assert_eq!(map.keys().count(), insert.len());
|
||||
for (a, b) in insert.iter().zip(map.keys()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
|
||||
let remove_fail = [99, 77];
|
||||
let remove = [4, 12, 8, 7];
|
||||
|
||||
for &key in &remove_fail {
|
||||
assert!(map.swap_remove_full(&key).is_none());
|
||||
}
|
||||
println!("{:?}", map);
|
||||
for &key in &remove {
|
||||
//println!("{:?}", map);
|
||||
let index = map.get_full(&key).unwrap().0;
|
||||
assert_eq!(map.swap_remove_full(&key), Some((index, key, key)));
|
||||
}
|
||||
println!("{:?}", map);
|
||||
|
||||
for key in &insert {
|
||||
assert_eq!(map.get(key).is_some(), !remove.contains(key));
|
||||
}
|
||||
assert_eq!(map.len(), insert.len() - remove.len());
|
||||
assert_eq!(map.keys().count(), insert.len() - remove.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_to_empty() {
|
||||
let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 };
|
||||
map.swap_remove(&5).unwrap();
|
||||
map.swap_remove(&4).unwrap();
|
||||
map.swap_remove(&0).unwrap();
|
||||
assert!(map.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn swap_remove_index() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
for &elt in &insert {
|
||||
map.insert(elt, elt * 2);
|
||||
}
|
||||
|
||||
let mut vector = insert.to_vec();
|
||||
let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
|
||||
|
||||
// check that the same swap remove sequence on vec and map
|
||||
// have the same result.
|
||||
for &rm in remove_sequence {
|
||||
let out_vec = vector.swap_remove(rm);
|
||||
let (out_map, _) = map.swap_remove_index(rm).unwrap();
|
||||
assert_eq!(out_vec, out_map);
|
||||
}
|
||||
assert_eq!(vector.len(), map.len());
|
||||
for (a, b) in vector.iter().zip(map.keys()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_eq_and_eq() {
|
||||
let mut map_a = IndexMap::new();
|
||||
map_a.insert(1, "1");
|
||||
map_a.insert(2, "2");
|
||||
let mut map_b = map_a.clone();
|
||||
assert_eq!(map_a, map_b);
|
||||
map_b.swap_remove(&1);
|
||||
assert_ne!(map_a, map_b);
|
||||
|
||||
let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect();
|
||||
assert_ne!(map_a, map_c);
|
||||
assert_ne!(map_c, map_a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extend() {
|
||||
let mut map = IndexMap::new();
|
||||
map.extend(vec![(&1, &2), (&3, &4)]);
|
||||
map.extend(vec![(5, 6)]);
|
||||
assert_eq!(
|
||||
map.into_iter().collect::<Vec<_>>(),
|
||||
vec![(1, 2), (3, 4), (5, 6)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn entry() {
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
map.insert(1, "1");
|
||||
map.insert(2, "2");
|
||||
{
|
||||
let e = map.entry(3);
|
||||
assert_eq!(e.index(), 2);
|
||||
let e = e.or_insert("3");
|
||||
assert_eq!(e, &"3");
|
||||
}
|
||||
|
||||
let e = map.entry(2);
|
||||
assert_eq!(e.index(), 1);
|
||||
assert_eq!(e.key(), &2);
|
||||
match e {
|
||||
Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"),
|
||||
Entry::Vacant(_) => panic!(),
|
||||
}
|
||||
assert_eq!(e.or_insert("4"), &"2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn entry_and_modify() {
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
map.insert(1, "1");
|
||||
map.entry(1).and_modify(|x| *x = "2");
|
||||
assert_eq!(Some(&"2"), map.get(&1));
|
||||
|
||||
map.entry(2).and_modify(|x| *x = "doesn't exist");
|
||||
assert_eq!(None, map.get(&2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn entry_or_default() {
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum TestEnum {
|
||||
DefaultValue,
|
||||
NonDefaultValue,
|
||||
}
|
||||
|
||||
impl Default for TestEnum {
|
||||
fn default() -> Self {
|
||||
TestEnum::DefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
map.insert(1, TestEnum::NonDefaultValue);
|
||||
assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default());
|
||||
|
||||
assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn occupied_entry_key() {
|
||||
// These keys match hash and equality, but their addresses are distinct.
|
||||
let (k1, k2) = (&mut 1, &mut 1);
|
||||
let k1_ptr = k1 as *const i32;
|
||||
let k2_ptr = k2 as *const i32;
|
||||
assert_ne!(k1_ptr, k2_ptr);
|
||||
|
||||
let mut map = IndexMap::new();
|
||||
map.insert(k1, "value");
|
||||
match map.entry(k2) {
|
||||
Entry::Occupied(ref e) => {
|
||||
// `OccupiedEntry::key` should reference the key in the map,
|
||||
// not the key that was used to find the entry.
|
||||
let ptr = *e.key() as *const i32;
|
||||
assert_eq!(ptr, k1_ptr);
|
||||
assert_ne!(ptr, k2_ptr);
|
||||
}
|
||||
Entry::Vacant(_) => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_index_entry() {
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
assert!(map.get_index_entry(0).is_none());
|
||||
|
||||
map.insert(0, "0");
|
||||
map.insert(1, "1");
|
||||
map.insert(2, "2");
|
||||
map.insert(3, "3");
|
||||
|
||||
assert!(map.get_index_entry(4).is_none());
|
||||
|
||||
{
|
||||
let e = map.get_index_entry(1).unwrap();
|
||||
assert_eq!(*e.key(), 1);
|
||||
assert_eq!(*e.get(), "1");
|
||||
assert_eq!(e.swap_remove(), "1");
|
||||
}
|
||||
|
||||
{
|
||||
let mut e = map.get_index_entry(1).unwrap();
|
||||
assert_eq!(*e.key(), 3);
|
||||
assert_eq!(*e.get(), "3");
|
||||
assert_eq!(e.insert("4"), "3");
|
||||
}
|
||||
|
||||
assert_eq!(*map.get(&3).unwrap(), "4");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keys() {
|
||||
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
|
||||
let map: IndexMap<_, _> = vec.into_iter().collect();
|
||||
let keys: Vec<_> = map.keys().copied().collect();
|
||||
assert_eq!(keys.len(), 3);
|
||||
assert!(keys.contains(&1));
|
||||
assert!(keys.contains(&2));
|
||||
assert!(keys.contains(&3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn into_keys() {
|
||||
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
|
||||
let map: IndexMap<_, _> = vec.into_iter().collect();
|
||||
let keys: Vec<i32> = map.into_keys().collect();
|
||||
assert_eq!(keys.len(), 3);
|
||||
assert!(keys.contains(&1));
|
||||
assert!(keys.contains(&2));
|
||||
assert!(keys.contains(&3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn values() {
|
||||
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
|
||||
let map: IndexMap<_, _> = vec.into_iter().collect();
|
||||
let values: Vec<_> = map.values().copied().collect();
|
||||
assert_eq!(values.len(), 3);
|
||||
assert!(values.contains(&'a'));
|
||||
assert!(values.contains(&'b'));
|
||||
assert!(values.contains(&'c'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn values_mut() {
|
||||
let vec = vec![(1, 1), (2, 2), (3, 3)];
|
||||
let mut map: IndexMap<_, _> = vec.into_iter().collect();
|
||||
for value in map.values_mut() {
|
||||
*value *= 2
|
||||
}
|
||||
let values: Vec<_> = map.values().copied().collect();
|
||||
assert_eq!(values.len(), 3);
|
||||
assert!(values.contains(&2));
|
||||
assert!(values.contains(&4));
|
||||
assert!(values.contains(&6));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn into_values() {
|
||||
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
|
||||
let map: IndexMap<_, _> = vec.into_iter().collect();
|
||||
let values: Vec<char> = map.into_values().collect();
|
||||
assert_eq!(values.len(), 3);
|
||||
assert!(values.contains(&'a'));
|
||||
assert!(values.contains(&'b'));
|
||||
assert!(values.contains(&'c'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "std")]
|
||||
fn from_array() {
|
||||
let map = IndexMap::from([(1, 2), (3, 4)]);
|
||||
let mut expected = IndexMap::new();
|
||||
expected.insert(1, 2);
|
||||
expected.insert(3, 4);
|
||||
|
||||
assert_eq!(map, expected)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_default() {
|
||||
struct K;
|
||||
struct V;
|
||||
fn assert_default<T>()
|
||||
where
|
||||
T: Default + Iterator,
|
||||
{
|
||||
assert!(T::default().next().is_none());
|
||||
}
|
||||
assert_default::<Iter<'static, K, V>>();
|
||||
assert_default::<IterMut<'static, K, V>>();
|
||||
assert_default::<IntoIter<K, V>>();
|
||||
assert_default::<Keys<'static, K, V>>();
|
||||
assert_default::<IntoKeys<K, V>>();
|
||||
assert_default::<Values<'static, K, V>>();
|
||||
assert_default::<ValuesMut<'static, K, V>>();
|
||||
assert_default::<IntoValues<K, V>>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_by() {
|
||||
// adapted from std's test for binary_search
|
||||
let b: IndexMap<_, i32> = []
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(0));
|
||||
|
||||
let b: IndexMap<_, i32> = [4]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&3)), Err(0));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Ok(0));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(1));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(4));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(4));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&9)), Err(6));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(5));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(5));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 3, 3, 3, 7]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&1)), Ok(0));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&2)), Err(1));
|
||||
assert!(match b.binary_search_by(|_, x| x.cmp(&3)) {
|
||||
Ok(1..=3) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert!(match b.binary_search_by(|_, x| x.cmp(&3)) {
|
||||
Ok(1..=3) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Err(4));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(4));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Err(4));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Ok(4));
|
||||
assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Err(5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_by_key() {
|
||||
// adapted from std's test for binary_search
|
||||
let b: IndexMap<_, i32> = []
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(0));
|
||||
|
||||
let b: IndexMap<_, i32> = [4]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&3, |_, &x| x), Err(0));
|
||||
assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Ok(0));
|
||||
assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(1));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3));
|
||||
assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3));
|
||||
assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(4));
|
||||
assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(4));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&9, |_, &x| x), Err(6));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3));
|
||||
assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3));
|
||||
assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(5));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(5));
|
||||
assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0));
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 3, 3, 3, 7]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0));
|
||||
assert_eq!(b.binary_search_by_key(&1, |_, &x| x), Ok(0));
|
||||
assert_eq!(b.binary_search_by_key(&2, |_, &x| x), Err(1));
|
||||
assert!(match b.binary_search_by_key(&3, |_, &x| x) {
|
||||
Ok(1..=3) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert!(match b.binary_search_by_key(&3, |_, &x| x) {
|
||||
Ok(1..=3) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Err(4));
|
||||
assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(4));
|
||||
assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Err(4));
|
||||
assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Ok(4));
|
||||
assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Err(5));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partition_point() {
|
||||
// adapted from std's test for partition_point
|
||||
let b: IndexMap<_, i32> = []
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 5), 0);
|
||||
|
||||
let b: IndexMap<_, i32> = [4]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 3), 0);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 4), 0);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 5), 1);
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 5), 3);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 6), 3);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 7), 4);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 8), 4);
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 9), 6);
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 6), 3);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 5), 3);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 8), 5);
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 7), 5);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 0), 0);
|
||||
|
||||
let b: IndexMap<_, i32> = [1, 3, 3, 3, 7]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, x)| (i + 100, x))
|
||||
.collect();
|
||||
assert_eq!(b.partition_point(|_, &x| x < 0), 0);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 1), 0);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 2), 1);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 3), 1);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 4), 4);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 5), 4);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 6), 4);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 7), 4);
|
||||
assert_eq!(b.partition_point(|_, &x| x < 8), 5);
|
||||
}
|
||||
90
third-party/vendor/indexmap/src/mutable_keys.rs
vendored
Normal file
90
third-party/vendor/indexmap/src/mutable_keys.rs
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
use core::hash::{BuildHasher, Hash};
|
||||
|
||||
use super::{Bucket, Entries, Equivalent, IndexMap};
|
||||
|
||||
/// Opt-in mutable access to [`IndexMap`] keys.
|
||||
///
|
||||
/// These methods expose `&mut K`, mutable references to the key as it is stored
|
||||
/// in the map.
|
||||
/// You are allowed to modify the keys in the hashmap **if the modification
|
||||
/// does not change the key’s hash and equality**.
|
||||
///
|
||||
/// If keys are modified erroneously, you can no longer look them up.
|
||||
/// This is sound (memory safe) but a logical error hazard (just like
|
||||
/// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be).
|
||||
///
|
||||
/// `use` this trait to enable its methods for `IndexMap`.
|
||||
///
|
||||
/// This trait is sealed and cannot be implemented for types outside this crate.
|
||||
pub trait MutableKeys: private::Sealed {
|
||||
type Key;
|
||||
type Value;
|
||||
|
||||
/// Return item index, mutable reference to key and value
|
||||
///
|
||||
/// Computes in **O(1)** time (average).
|
||||
fn get_full_mut2<Q: ?Sized>(
|
||||
&mut self,
|
||||
key: &Q,
|
||||
) -> Option<(usize, &mut Self::Key, &mut Self::Value)>
|
||||
where
|
||||
Q: Hash + Equivalent<Self::Key>;
|
||||
|
||||
/// Return mutable reference to key and value at an index.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
///
|
||||
/// Computes in **O(1)** time.
|
||||
fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>;
|
||||
|
||||
/// Scan through each key-value pair in the map and keep those where the
|
||||
/// closure `keep` returns `true`.
|
||||
///
|
||||
/// The elements are visited in order, and remaining elements keep their
|
||||
/// order.
|
||||
///
|
||||
/// Computes in **O(n)** time (average).
|
||||
fn retain2<F>(&mut self, keep: F)
|
||||
where
|
||||
F: FnMut(&mut Self::Key, &mut Self::Value) -> bool;
|
||||
}
|
||||
|
||||
/// Opt-in mutable access to keys.
|
||||
///
|
||||
/// See [`MutableKeys`] for more information.
|
||||
impl<K, V, S> MutableKeys for IndexMap<K, V, S>
|
||||
where
|
||||
S: BuildHasher,
|
||||
{
|
||||
type Key = K;
|
||||
type Value = V;
|
||||
|
||||
fn get_full_mut2<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)>
|
||||
where
|
||||
Q: Hash + Equivalent<K>,
|
||||
{
|
||||
if let Some(i) = self.get_index_of(key) {
|
||||
let entry = &mut self.as_entries_mut()[i];
|
||||
Some((i, &mut entry.key, &mut entry.value))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> {
|
||||
self.as_entries_mut().get_mut(index).map(Bucket::muts)
|
||||
}
|
||||
|
||||
fn retain2<F>(&mut self, keep: F)
|
||||
where
|
||||
F: FnMut(&mut K, &mut V) -> bool,
|
||||
{
|
||||
self.retain_mut(keep)
|
||||
}
|
||||
}
|
||||
|
||||
mod private {
|
||||
pub trait Sealed {}
|
||||
|
||||
impl<K, V, S> Sealed for super::IndexMap<K, V, S> {}
|
||||
}
|
||||
663
third-party/vendor/indexmap/src/rayon/map.rs
vendored
Normal file
663
third-party/vendor/indexmap/src/rayon/map.rs
vendored
Normal file
|
|
@ -0,0 +1,663 @@
|
|||
//! Parallel iterator types for [`IndexMap`] with [`rayon`][::rayon].
|
||||
//!
|
||||
//! You will rarely need to interact with this module directly unless you need to name one of the
|
||||
//! iterator types.
|
||||
|
||||
use super::collect;
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::vec::Vec;
|
||||
use alloc::boxed::Box;
|
||||
use core::cmp::Ordering;
|
||||
use core::fmt;
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use core::ops::RangeBounds;
|
||||
|
||||
use crate::map::Slice;
|
||||
use crate::Bucket;
|
||||
use crate::Entries;
|
||||
use crate::IndexMap;
|
||||
|
||||
impl<K, V, S> IntoParallelIterator for IndexMap<K, V, S>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (K, V);
|
||||
type Iter = IntoParIter<K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
IntoParIter {
|
||||
entries: self.into_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> IntoParallelIterator for Box<Slice<K, V>>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (K, V);
|
||||
type Iter = IntoParIter<K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
IntoParIter {
|
||||
entries: self.into_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel owning iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::into_par_iter`] method
|
||||
/// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more.
|
||||
pub struct IntoParIter<K, V> {
|
||||
entries: Vec<Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoParIter<K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::refs);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> ParallelIterator for IntoParIter<K, V> {
|
||||
type Item = (K, V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> IndexedParallelIterator for IntoParIter<K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap<K, V, S>
|
||||
where
|
||||
K: Sync,
|
||||
V: Sync,
|
||||
{
|
||||
type Item = (&'a K, &'a V);
|
||||
type Iter = ParIter<'a, K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIter {
|
||||
entries: self.as_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> IntoParallelIterator for &'a Slice<K, V>
|
||||
where
|
||||
K: Sync,
|
||||
V: Sync,
|
||||
{
|
||||
type Item = (&'a K, &'a V);
|
||||
type Iter = ParIter<'a, K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIter {
|
||||
entries: &self.entries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::par_iter`] method
|
||||
/// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more.
|
||||
///
|
||||
/// [`IndexMap::par_iter`]: ../struct.IndexMap.html#method.par_iter
|
||||
pub struct ParIter<'a, K, V> {
|
||||
entries: &'a [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<K, V> Clone for ParIter<'_, K, V> {
|
||||
fn clone(&self) -> Self {
|
||||
ParIter { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIter<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::refs);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
|
||||
type Item = (&'a K, &'a V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::refs);
|
||||
}
|
||||
|
||||
impl<K: Sync, V: Sync> IndexedParallelIterator for ParIter<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::refs);
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap<K, V, S>
|
||||
where
|
||||
K: Sync + Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (&'a K, &'a mut V);
|
||||
type Iter = ParIterMut<'a, K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIterMut {
|
||||
entries: self.as_entries_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K, V> IntoParallelIterator for &'a mut Slice<K, V>
|
||||
where
|
||||
K: Sync + Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (&'a K, &'a mut V);
|
||||
type Iter = ParIterMut<'a, K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIterMut {
|
||||
entries: &mut self.entries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel mutable iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::par_iter_mut`] method
|
||||
/// (provided by rayon's [`IntoParallelRefMutIterator`] trait). See its documentation for more.
|
||||
///
|
||||
/// [`IndexMap::par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut
|
||||
pub struct ParIterMut<'a, K, V> {
|
||||
entries: &'a mut [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIterMut<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::refs);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
|
||||
type Item = (&'a K, &'a mut V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::ref_mut);
|
||||
}
|
||||
|
||||
impl<K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::ref_mut);
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> ParallelDrainRange<usize> for &'a mut IndexMap<K, V, S>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (K, V);
|
||||
type Iter = ParDrain<'a, K, V>;
|
||||
|
||||
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
|
||||
ParDrain {
|
||||
entries: self.core.par_drain(range),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel draining iterator over the entries of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::par_drain`] method
|
||||
/// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more.
|
||||
///
|
||||
/// [`IndexMap::par_drain`]: ../struct.IndexMap.html#method.par_drain
|
||||
pub struct ParDrain<'a, K: Send, V: Send> {
|
||||
entries: rayon::vec::Drain<'a, Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> ParallelIterator for ParDrain<'_, K, V> {
|
||||
type Item = (K, V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> IndexedParallelIterator for ParDrain<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
/// Parallel iterator methods and other parallel methods.
|
||||
///
|
||||
/// The following methods **require crate feature `"rayon"`**.
|
||||
///
|
||||
/// See also the `IntoParallelIterator` implementations.
|
||||
impl<K, V, S> IndexMap<K, V, S>
|
||||
where
|
||||
K: Sync,
|
||||
V: Sync,
|
||||
{
|
||||
/// Return a parallel iterator over the keys of the map.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the map is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_keys(&self) -> ParKeys<'_, K, V> {
|
||||
ParKeys {
|
||||
entries: self.as_entries(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a parallel iterator over the values of the map.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the map is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_values(&self) -> ParValues<'_, K, V> {
|
||||
ParValues {
|
||||
entries: self.as_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parallel iterator methods and other parallel methods.
|
||||
///
|
||||
/// The following methods **require crate feature `"rayon"`**.
|
||||
///
|
||||
/// See also the `IntoParallelIterator` implementations.
|
||||
impl<K, V> Slice<K, V>
|
||||
where
|
||||
K: Sync,
|
||||
V: Sync,
|
||||
{
|
||||
/// Return a parallel iterator over the keys of the map slice.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the slice is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_keys(&self) -> ParKeys<'_, K, V> {
|
||||
ParKeys {
|
||||
entries: &self.entries,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a parallel iterator over the values of the map slice.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the slice is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_values(&self) -> ParValues<'_, K, V> {
|
||||
ParValues {
|
||||
entries: &self.entries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> IndexMap<K, V, S>
|
||||
where
|
||||
K: Hash + Eq + Sync,
|
||||
V: Sync,
|
||||
S: BuildHasher,
|
||||
{
|
||||
/// Returns `true` if `self` contains all of the same key-value pairs as `other`,
|
||||
/// regardless of each map's indexed order, determined in parallel.
|
||||
pub fn par_eq<V2, S2>(&self, other: &IndexMap<K, V2, S2>) -> bool
|
||||
where
|
||||
V: PartialEq<V2>,
|
||||
V2: Sync,
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
self.len() == other.len()
|
||||
&& self
|
||||
.par_iter()
|
||||
.all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v))
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator over the keys of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::par_keys`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParKeys<'a, K, V> {
|
||||
entries: &'a [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<K, V> Clone for ParKeys<'_, K, V> {
|
||||
fn clone(&self) -> Self {
|
||||
ParKeys { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: fmt::Debug, V> fmt::Debug for ParKeys<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::key_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> {
|
||||
type Item = &'a K;
|
||||
|
||||
parallel_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<K: Sync, V: Sync> IndexedParallelIterator for ParKeys<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
/// A parallel iterator over the values of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::par_values`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParValues<'a, K, V> {
|
||||
entries: &'a [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<K, V> Clone for ParValues<'_, K, V> {
|
||||
fn clone(&self) -> Self {
|
||||
ParValues { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V: fmt::Debug> fmt::Debug for ParValues<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::value_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> {
|
||||
type Item = &'a V;
|
||||
|
||||
parallel_iterator_methods!(Bucket::value_ref);
|
||||
}
|
||||
|
||||
impl<K: Sync, V: Sync> IndexedParallelIterator for ParValues<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::value_ref);
|
||||
}
|
||||
|
||||
impl<K, V, S> IndexMap<K, V, S>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
/// Return a parallel iterator over mutable references to the values of the map
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the map is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
|
||||
ParValuesMut {
|
||||
entries: self.as_entries_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Slice<K, V>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
/// Return a parallel iterator over mutable references to the the values of the map slice.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the slice is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
|
||||
ParValuesMut {
|
||||
entries: &mut self.entries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> IndexMap<K, V, S>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
/// Sort the map’s key-value pairs in parallel, by the default ordering of the keys.
|
||||
pub fn par_sort_keys(&mut self)
|
||||
where
|
||||
K: Ord,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the map’s key-value pairs in place and in parallel, using the comparison
|
||||
/// function `cmp`.
|
||||
///
|
||||
/// The comparison function receives two key and value pairs to compare (you
|
||||
/// can sort by keys or values or their combination as needed).
|
||||
pub fn par_sort_by<F>(&mut self, cmp: F)
|
||||
where
|
||||
F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the key-value pairs of the map in parallel and return a by-value parallel
|
||||
/// iterator of the key-value pairs with the result.
|
||||
pub fn par_sorted_by<F>(self, cmp: F) -> IntoParIter<K, V>
|
||||
where
|
||||
F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
|
||||
{
|
||||
let mut entries = self.into_entries();
|
||||
entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
|
||||
IntoParIter { entries }
|
||||
}
|
||||
|
||||
/// Sort the map's key-value pairs in parallel, by the default ordering of the keys.
|
||||
pub fn par_sort_unstable_keys(&mut self)
|
||||
where
|
||||
K: Ord,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the map's key-value pairs in place and in parallel, using the comparison
|
||||
/// function `cmp`.
|
||||
///
|
||||
/// The comparison function receives two key and value pairs to compare (you
|
||||
/// can sort by keys or values or their combination as needed).
|
||||
pub fn par_sort_unstable_by<F>(&mut self, cmp: F)
|
||||
where
|
||||
F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the key-value pairs of the map in parallel and return a by-value parallel
|
||||
/// iterator of the key-value pairs with the result.
|
||||
pub fn par_sorted_unstable_by<F>(self, cmp: F) -> IntoParIter<K, V>
|
||||
where
|
||||
F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
|
||||
{
|
||||
let mut entries = self.into_entries();
|
||||
entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
|
||||
IntoParIter { entries }
|
||||
}
|
||||
|
||||
/// Sort the map’s key-value pairs in place and in parallel, using a sort-key extraction
|
||||
/// function.
|
||||
pub fn par_sort_by_cached_key<T, F>(&mut self, sort_key: F)
|
||||
where
|
||||
T: Ord + Send,
|
||||
F: Fn(&K, &V) -> T + Sync,
|
||||
{
|
||||
self.with_entries(move |entries| {
|
||||
entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel mutable iterator over the values of an [`IndexMap`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexMap::par_values_mut`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParValuesMut<'a, K, V> {
|
||||
entries: &'a mut [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<K, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::value_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> {
|
||||
type Item = &'a mut V;
|
||||
|
||||
parallel_iterator_methods!(Bucket::value_mut);
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> IndexedParallelIterator for ParValuesMut<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::value_mut);
|
||||
}
|
||||
|
||||
impl<K, V, S> FromParallelIterator<(K, V)> for IndexMap<K, V, S>
|
||||
where
|
||||
K: Eq + Hash + Send,
|
||||
V: Send,
|
||||
S: BuildHasher + Default + Send,
|
||||
{
|
||||
fn from_par_iter<I>(iter: I) -> Self
|
||||
where
|
||||
I: IntoParallelIterator<Item = (K, V)>,
|
||||
{
|
||||
let list = collect(iter);
|
||||
let len = list.iter().map(Vec::len).sum();
|
||||
let mut map = Self::with_capacity_and_hasher(len, S::default());
|
||||
for vec in list {
|
||||
map.extend(vec);
|
||||
}
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V, S> ParallelExtend<(K, V)> for IndexMap<K, V, S>
|
||||
where
|
||||
K: Eq + Hash + Send,
|
||||
V: Send,
|
||||
S: BuildHasher + Send,
|
||||
{
|
||||
fn par_extend<I>(&mut self, iter: I)
|
||||
where
|
||||
I: IntoParallelIterator<Item = (K, V)>,
|
||||
{
|
||||
for vec in collect(iter) {
|
||||
self.extend(vec);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap<K, V, S>
|
||||
where
|
||||
K: Copy + Eq + Hash + Send + Sync,
|
||||
V: Copy + Send + Sync,
|
||||
S: BuildHasher + Send,
|
||||
{
|
||||
fn par_extend<I>(&mut self, iter: I)
|
||||
where
|
||||
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
|
||||
{
|
||||
for vec in collect(iter) {
|
||||
self.extend(vec);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::string::String;
|
||||
|
||||
#[test]
|
||||
fn insert_order() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut map = IndexMap::new();
|
||||
|
||||
for &elt in &insert {
|
||||
map.insert(elt, ());
|
||||
}
|
||||
|
||||
assert_eq!(map.par_keys().count(), map.len());
|
||||
assert_eq!(map.par_keys().count(), insert.len());
|
||||
insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| {
|
||||
assert_eq!(a, b);
|
||||
});
|
||||
(0..insert.len())
|
||||
.into_par_iter()
|
||||
.zip(map.par_keys())
|
||||
.for_each(|(i, k)| {
|
||||
assert_eq!(map.get_index(i).unwrap().0, k);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_eq_and_eq() {
|
||||
let mut map_a = IndexMap::new();
|
||||
map_a.insert(1, "1");
|
||||
map_a.insert(2, "2");
|
||||
let mut map_b = map_a.clone();
|
||||
assert!(map_a.par_eq(&map_b));
|
||||
map_b.swap_remove(&1);
|
||||
assert!(!map_a.par_eq(&map_b));
|
||||
map_b.insert(3, "3");
|
||||
assert!(!map_a.par_eq(&map_b));
|
||||
|
||||
let map_c: IndexMap<_, String> =
|
||||
map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect();
|
||||
assert!(!map_a.par_eq(&map_c));
|
||||
assert!(!map_c.par_eq(&map_a));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extend() {
|
||||
let mut map = IndexMap::new();
|
||||
map.par_extend(vec![(&1, &2), (&3, &4)]);
|
||||
map.par_extend(vec![(5, 6)]);
|
||||
assert_eq!(
|
||||
map.into_par_iter().collect::<Vec<_>>(),
|
||||
vec![(1, 2), (3, 4), (5, 6)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn keys() {
|
||||
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
|
||||
let map: IndexMap<_, _> = vec.into_par_iter().collect();
|
||||
let keys: Vec<_> = map.par_keys().copied().collect();
|
||||
assert_eq!(keys.len(), 3);
|
||||
assert!(keys.contains(&1));
|
||||
assert!(keys.contains(&2));
|
||||
assert!(keys.contains(&3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn values() {
|
||||
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
|
||||
let map: IndexMap<_, _> = vec.into_par_iter().collect();
|
||||
let values: Vec<_> = map.par_values().copied().collect();
|
||||
assert_eq!(values.len(), 3);
|
||||
assert!(values.contains(&'a'));
|
||||
assert!(values.contains(&'b'));
|
||||
assert!(values.contains(&'c'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn values_mut() {
|
||||
let vec = vec![(1, 1), (2, 2), (3, 3)];
|
||||
let mut map: IndexMap<_, _> = vec.into_par_iter().collect();
|
||||
map.par_values_mut().for_each(|value| *value *= 2);
|
||||
let values: Vec<_> = map.par_values().copied().collect();
|
||||
assert_eq!(values.len(), 3);
|
||||
assert!(values.contains(&2));
|
||||
assert!(values.contains(&4));
|
||||
assert!(values.contains(&6));
|
||||
}
|
||||
}
|
||||
29
third-party/vendor/indexmap/src/rayon/mod.rs
vendored
Normal file
29
third-party/vendor/indexmap/src/rayon/mod.rs
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
#![cfg_attr(docsrs, doc(cfg(feature = "rayon")))]
|
||||
|
||||
use rayon::prelude::*;
|
||||
|
||||
use alloc::collections::LinkedList;
|
||||
|
||||
use crate::vec::Vec;
|
||||
|
||||
pub mod map;
|
||||
pub mod set;
|
||||
|
||||
// This form of intermediate collection is also how Rayon collects `HashMap`.
|
||||
// Note that the order will also be preserved!
|
||||
fn collect<I: IntoParallelIterator>(iter: I) -> LinkedList<Vec<I::Item>> {
|
||||
iter.into_par_iter()
|
||||
.fold(Vec::new, |mut vec, elem| {
|
||||
vec.push(elem);
|
||||
vec
|
||||
})
|
||||
.map(|vec| {
|
||||
let mut list = LinkedList::new();
|
||||
list.push_back(vec);
|
||||
list
|
||||
})
|
||||
.reduce(LinkedList::new, |mut list1, mut list2| {
|
||||
list1.append(&mut list2);
|
||||
list1
|
||||
})
|
||||
}
|
||||
756
third-party/vendor/indexmap/src/rayon/set.rs
vendored
Normal file
756
third-party/vendor/indexmap/src/rayon/set.rs
vendored
Normal file
|
|
@ -0,0 +1,756 @@
|
|||
//! Parallel iterator types for [`IndexSet`] with [rayon][::rayon].
|
||||
//!
|
||||
//! You will rarely need to interact with this module directly unless you need to name one of the
|
||||
//! iterator types.
|
||||
|
||||
use super::collect;
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::vec::Vec;
|
||||
use alloc::boxed::Box;
|
||||
use core::cmp::Ordering;
|
||||
use core::fmt;
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use core::ops::RangeBounds;
|
||||
|
||||
use crate::set::Slice;
|
||||
use crate::Entries;
|
||||
use crate::IndexSet;
|
||||
|
||||
type Bucket<T> = crate::Bucket<T, ()>;
|
||||
|
||||
impl<T, S> IntoParallelIterator for IndexSet<T, S>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
type Item = T;
|
||||
type Iter = IntoParIter<T>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
IntoParIter {
|
||||
entries: self.into_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> IntoParallelIterator for Box<Slice<T>>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
type Item = T;
|
||||
type Iter = IntoParIter<T>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
IntoParIter {
|
||||
entries: self.into_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel owning iterator over the items of an [`IndexSet`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::into_par_iter`] method
|
||||
/// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more.
|
||||
pub struct IntoParIter<T> {
|
||||
entries: Vec<Bucket<T>>,
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for IntoParIter<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::key_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send> ParallelIterator for IntoParIter<T> {
|
||||
type Item = T;
|
||||
|
||||
parallel_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T: Send> IndexedParallelIterator for IntoParIter<T> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<'a, T, S> IntoParallelIterator for &'a IndexSet<T, S>
|
||||
where
|
||||
T: Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
type Iter = ParIter<'a, T>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIter {
|
||||
entries: self.as_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> IntoParallelIterator for &'a Slice<T>
|
||||
where
|
||||
T: Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
type Iter = ParIter<'a, T>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIter {
|
||||
entries: &self.entries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator over the items of an [`IndexSet`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::par_iter`] method
|
||||
/// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more.
|
||||
///
|
||||
/// [`IndexSet::par_iter`]: ../struct.IndexSet.html#method.par_iter
|
||||
pub struct ParIter<'a, T> {
|
||||
entries: &'a [Bucket<T>],
|
||||
}
|
||||
|
||||
impl<T> Clone for ParIter<'_, T> {
|
||||
fn clone(&self) -> Self {
|
||||
ParIter { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for ParIter<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.entries.iter().map(Bucket::key_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
|
||||
type Item = &'a T;
|
||||
|
||||
parallel_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<T: Sync> IndexedParallelIterator for ParIter<'_, T> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<'a, T, S> ParallelDrainRange<usize> for &'a mut IndexSet<T, S>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
type Item = T;
|
||||
type Iter = ParDrain<'a, T>;
|
||||
|
||||
fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
|
||||
ParDrain {
|
||||
entries: self.map.core.par_drain(range),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel draining iterator over the items of an [`IndexSet`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::par_drain`] method
|
||||
/// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more.
|
||||
///
|
||||
/// [`IndexSet::par_drain`]: ../struct.IndexSet.html#method.par_drain
|
||||
pub struct ParDrain<'a, T: Send> {
|
||||
entries: rayon::vec::Drain<'a, Bucket<T>>,
|
||||
}
|
||||
|
||||
impl<T: Send> ParallelIterator for ParDrain<'_, T> {
|
||||
type Item = T;
|
||||
|
||||
parallel_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T: Send> IndexedParallelIterator for ParDrain<'_, T> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
/// Parallel iterator methods and other parallel methods.
|
||||
///
|
||||
/// The following methods **require crate feature `"rayon"`**.
|
||||
///
|
||||
/// See also the `IntoParallelIterator` implementations.
|
||||
impl<T, S> IndexSet<T, S>
|
||||
where
|
||||
T: Hash + Eq + Sync,
|
||||
S: BuildHasher + Sync,
|
||||
{
|
||||
/// Return a parallel iterator over the values that are in `self` but not `other`.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the `self` set is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_difference<'a, S2>(
|
||||
&'a self,
|
||||
other: &'a IndexSet<T, S2>,
|
||||
) -> ParDifference<'a, T, S, S2>
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
ParDifference {
|
||||
set1: self,
|
||||
set2: other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a parallel iterator over the values that are in `self` or `other`,
|
||||
/// but not in both.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the sets is still preserved for operations like `reduce` and `collect`.
|
||||
/// Values from `self` are produced in their original order, followed by
|
||||
/// values from `other` in their original order.
|
||||
pub fn par_symmetric_difference<'a, S2>(
|
||||
&'a self,
|
||||
other: &'a IndexSet<T, S2>,
|
||||
) -> ParSymmetricDifference<'a, T, S, S2>
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
ParSymmetricDifference {
|
||||
set1: self,
|
||||
set2: other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a parallel iterator over the values that are in both `self` and `other`.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the `self` set is still preserved for operations like `reduce` and `collect`.
|
||||
pub fn par_intersection<'a, S2>(
|
||||
&'a self,
|
||||
other: &'a IndexSet<T, S2>,
|
||||
) -> ParIntersection<'a, T, S, S2>
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
ParIntersection {
|
||||
set1: self,
|
||||
set2: other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a parallel iterator over all values that are in `self` or `other`.
|
||||
///
|
||||
/// While parallel iterators can process items in any order, their relative order
|
||||
/// in the sets is still preserved for operations like `reduce` and `collect`.
|
||||
/// Values from `self` are produced in their original order, followed by
|
||||
/// values that are unique to `other` in their original order.
|
||||
pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> ParUnion<'a, T, S, S2>
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
ParUnion {
|
||||
set1: self,
|
||||
set2: other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` contains all of the same values as `other`,
|
||||
/// regardless of each set's indexed order, determined in parallel.
|
||||
pub fn par_eq<S2>(&self, other: &IndexSet<T, S2>) -> bool
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
self.len() == other.len() && self.par_is_subset(other)
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` has no elements in common with `other`,
|
||||
/// determined in parallel.
|
||||
pub fn par_is_disjoint<S2>(&self, other: &IndexSet<T, S2>) -> bool
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
if self.len() <= other.len() {
|
||||
self.par_iter().all(move |value| !other.contains(value))
|
||||
} else {
|
||||
other.par_iter().all(move |value| !self.contains(value))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if all elements of `other` are contained in `self`,
|
||||
/// determined in parallel.
|
||||
pub fn par_is_superset<S2>(&self, other: &IndexSet<T, S2>) -> bool
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
other.par_is_subset(self)
|
||||
}
|
||||
|
||||
/// Returns `true` if all elements of `self` are contained in `other`,
|
||||
/// determined in parallel.
|
||||
pub fn par_is_subset<S2>(&self, other: &IndexSet<T, S2>) -> bool
|
||||
where
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value))
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator producing elements in the difference of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::par_difference`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParDifference<'a, T, S1, S2> {
|
||||
set1: &'a IndexSet<T, S1>,
|
||||
set2: &'a IndexSet<T, S2>,
|
||||
}
|
||||
|
||||
impl<T, S1, S2> Clone for ParDifference<'_, T, S1, S2> {
|
||||
fn clone(&self) -> Self {
|
||||
ParDifference { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> fmt::Debug for ParDifference<'_, T, S1, S2>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list()
|
||||
.entries(self.set1.difference(self.set2))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2>
|
||||
where
|
||||
T: Hash + Eq + Sync,
|
||||
S1: BuildHasher + Sync,
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
let Self { set1, set2 } = self;
|
||||
|
||||
set1.par_iter()
|
||||
.filter(move |&item| !set2.contains(item))
|
||||
.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator producing elements in the intersection of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::par_intersection`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParIntersection<'a, T, S1, S2> {
|
||||
set1: &'a IndexSet<T, S1>,
|
||||
set2: &'a IndexSet<T, S2>,
|
||||
}
|
||||
|
||||
impl<T, S1, S2> Clone for ParIntersection<'_, T, S1, S2> {
|
||||
fn clone(&self) -> Self {
|
||||
ParIntersection { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> fmt::Debug for ParIntersection<'_, T, S1, S2>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list()
|
||||
.entries(self.set1.intersection(self.set2))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2>
|
||||
where
|
||||
T: Hash + Eq + Sync,
|
||||
S1: BuildHasher + Sync,
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
let Self { set1, set2 } = self;
|
||||
|
||||
set1.par_iter()
|
||||
.filter(move |&item| set2.contains(item))
|
||||
.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator producing elements in the symmetric difference of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::par_symmetric_difference`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParSymmetricDifference<'a, T, S1, S2> {
|
||||
set1: &'a IndexSet<T, S1>,
|
||||
set2: &'a IndexSet<T, S2>,
|
||||
}
|
||||
|
||||
impl<T, S1, S2> Clone for ParSymmetricDifference<'_, T, S1, S2> {
|
||||
fn clone(&self) -> Self {
|
||||
ParSymmetricDifference { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> fmt::Debug for ParSymmetricDifference<'_, T, S1, S2>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list()
|
||||
.entries(self.set1.symmetric_difference(self.set2))
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2>
|
||||
where
|
||||
T: Hash + Eq + Sync,
|
||||
S1: BuildHasher + Sync,
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
let Self { set1, set2 } = self;
|
||||
|
||||
set1.par_difference(set2)
|
||||
.chain(set2.par_difference(set1))
|
||||
.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
/// A parallel iterator producing elements in the union of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::par_union`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct ParUnion<'a, T, S1, S2> {
|
||||
set1: &'a IndexSet<T, S1>,
|
||||
set2: &'a IndexSet<T, S2>,
|
||||
}
|
||||
|
||||
impl<T, S1, S2> Clone for ParUnion<'_, T, S1, S2> {
|
||||
fn clone(&self) -> Self {
|
||||
ParUnion { ..*self }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> fmt::Debug for ParUnion<'_, T, S1, S2>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.set1.union(self.set2)).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2>
|
||||
where
|
||||
T: Hash + Eq + Sync,
|
||||
S1: BuildHasher + Sync,
|
||||
S2: BuildHasher + Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
let Self { set1, set2 } = self;
|
||||
|
||||
set1.par_iter()
|
||||
.chain(set2.par_difference(set1))
|
||||
.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parallel sorting methods.
|
||||
///
|
||||
/// The following methods **require crate feature `"rayon"`**.
|
||||
impl<T, S> IndexSet<T, S>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
/// Sort the set’s values in parallel by their default ordering.
|
||||
pub fn par_sort(&mut self)
|
||||
where
|
||||
T: Ord,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the set’s values in place and in parallel, using the comparison function `cmp`.
|
||||
pub fn par_sort_by<F>(&mut self, cmp: F)
|
||||
where
|
||||
F: Fn(&T, &T) -> Ordering + Sync,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_by(move |a, b| cmp(&a.key, &b.key));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the values of the set in parallel and return a by-value parallel iterator of
|
||||
/// the values with the result.
|
||||
pub fn par_sorted_by<F>(self, cmp: F) -> IntoParIter<T>
|
||||
where
|
||||
F: Fn(&T, &T) -> Ordering + Sync,
|
||||
{
|
||||
let mut entries = self.into_entries();
|
||||
entries.par_sort_by(move |a, b| cmp(&a.key, &b.key));
|
||||
IntoParIter { entries }
|
||||
}
|
||||
|
||||
/// Sort the set's values in parallel by their default ordering.
|
||||
pub fn par_sort_unstable(&mut self)
|
||||
where
|
||||
T: Ord,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the set’s values in place and in parallel, using the comparison function `cmp`.
|
||||
pub fn par_sort_unstable_by<F>(&mut self, cmp: F)
|
||||
where
|
||||
F: Fn(&T, &T) -> Ordering + Sync,
|
||||
{
|
||||
self.with_entries(|entries| {
|
||||
entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
|
||||
});
|
||||
}
|
||||
|
||||
/// Sort the values of the set in parallel and return a by-value parallel iterator of
|
||||
/// the values with the result.
|
||||
pub fn par_sorted_unstable_by<F>(self, cmp: F) -> IntoParIter<T>
|
||||
where
|
||||
F: Fn(&T, &T) -> Ordering + Sync,
|
||||
{
|
||||
let mut entries = self.into_entries();
|
||||
entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
|
||||
IntoParIter { entries }
|
||||
}
|
||||
|
||||
/// Sort the set’s values in place and in parallel, using a key extraction function.
|
||||
pub fn par_sort_by_cached_key<K, F>(&mut self, sort_key: F)
|
||||
where
|
||||
K: Ord + Send,
|
||||
F: Fn(&T) -> K + Sync,
|
||||
{
|
||||
self.with_entries(move |entries| {
|
||||
entries.par_sort_by_cached_key(move |a| sort_key(&a.key));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> FromParallelIterator<T> for IndexSet<T, S>
|
||||
where
|
||||
T: Eq + Hash + Send,
|
||||
S: BuildHasher + Default + Send,
|
||||
{
|
||||
fn from_par_iter<I>(iter: I) -> Self
|
||||
where
|
||||
I: IntoParallelIterator<Item = T>,
|
||||
{
|
||||
let list = collect(iter);
|
||||
let len = list.iter().map(Vec::len).sum();
|
||||
let mut set = Self::with_capacity_and_hasher(len, S::default());
|
||||
for vec in list {
|
||||
set.extend(vec);
|
||||
}
|
||||
set
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> ParallelExtend<T> for IndexSet<T, S>
|
||||
where
|
||||
T: Eq + Hash + Send,
|
||||
S: BuildHasher + Send,
|
||||
{
|
||||
fn par_extend<I>(&mut self, iter: I)
|
||||
where
|
||||
I: IntoParallelIterator<Item = T>,
|
||||
{
|
||||
for vec in collect(iter) {
|
||||
self.extend(vec);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet<T, S>
|
||||
where
|
||||
T: Copy + Eq + Hash + Send + Sync,
|
||||
S: BuildHasher + Send,
|
||||
{
|
||||
fn par_extend<I>(&mut self, iter: I)
|
||||
where
|
||||
I: IntoParallelIterator<Item = &'a T>,
|
||||
{
|
||||
for vec in collect(iter) {
|
||||
self.extend(vec);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn insert_order() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut set = IndexSet::new();
|
||||
|
||||
for &elt in &insert {
|
||||
set.insert(elt);
|
||||
}
|
||||
|
||||
assert_eq!(set.par_iter().count(), set.len());
|
||||
assert_eq!(set.par_iter().count(), insert.len());
|
||||
insert.par_iter().zip(&set).for_each(|(a, b)| {
|
||||
assert_eq!(a, b);
|
||||
});
|
||||
(0..insert.len())
|
||||
.into_par_iter()
|
||||
.zip(&set)
|
||||
.for_each(|(i, v)| {
|
||||
assert_eq!(set.get_index(i).unwrap(), v);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_eq_and_eq() {
|
||||
let mut set_a = IndexSet::new();
|
||||
set_a.insert(1);
|
||||
set_a.insert(2);
|
||||
let mut set_b = set_a.clone();
|
||||
assert!(set_a.par_eq(&set_b));
|
||||
set_b.swap_remove(&1);
|
||||
assert!(!set_a.par_eq(&set_b));
|
||||
set_b.insert(3);
|
||||
assert!(!set_a.par_eq(&set_b));
|
||||
|
||||
let set_c: IndexSet<_> = set_b.into_par_iter().collect();
|
||||
assert!(!set_a.par_eq(&set_c));
|
||||
assert!(!set_c.par_eq(&set_a));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extend() {
|
||||
let mut set = IndexSet::new();
|
||||
set.par_extend(vec![&1, &2, &3, &4]);
|
||||
set.par_extend(vec![5, 6]);
|
||||
assert_eq!(
|
||||
set.into_par_iter().collect::<Vec<_>>(),
|
||||
vec![1, 2, 3, 4, 5, 6]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn comparisons() {
|
||||
let set_a: IndexSet<_> = (0..3).collect();
|
||||
let set_b: IndexSet<_> = (3..6).collect();
|
||||
let set_c: IndexSet<_> = (0..6).collect();
|
||||
let set_d: IndexSet<_> = (3..9).collect();
|
||||
|
||||
assert!(!set_a.par_is_disjoint(&set_a));
|
||||
assert!(set_a.par_is_subset(&set_a));
|
||||
assert!(set_a.par_is_superset(&set_a));
|
||||
|
||||
assert!(set_a.par_is_disjoint(&set_b));
|
||||
assert!(set_b.par_is_disjoint(&set_a));
|
||||
assert!(!set_a.par_is_subset(&set_b));
|
||||
assert!(!set_b.par_is_subset(&set_a));
|
||||
assert!(!set_a.par_is_superset(&set_b));
|
||||
assert!(!set_b.par_is_superset(&set_a));
|
||||
|
||||
assert!(!set_a.par_is_disjoint(&set_c));
|
||||
assert!(!set_c.par_is_disjoint(&set_a));
|
||||
assert!(set_a.par_is_subset(&set_c));
|
||||
assert!(!set_c.par_is_subset(&set_a));
|
||||
assert!(!set_a.par_is_superset(&set_c));
|
||||
assert!(set_c.par_is_superset(&set_a));
|
||||
|
||||
assert!(!set_c.par_is_disjoint(&set_d));
|
||||
assert!(!set_d.par_is_disjoint(&set_c));
|
||||
assert!(!set_c.par_is_subset(&set_d));
|
||||
assert!(!set_d.par_is_subset(&set_c));
|
||||
assert!(!set_c.par_is_superset(&set_d));
|
||||
assert!(!set_d.par_is_superset(&set_c));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_comparisons() {
|
||||
use std::iter::empty;
|
||||
|
||||
fn check<'a, I1, I2>(iter1: I1, iter2: I2)
|
||||
where
|
||||
I1: ParallelIterator<Item = &'a i32>,
|
||||
I2: Iterator<Item = i32>,
|
||||
{
|
||||
let v1: Vec<_> = iter1.copied().collect();
|
||||
let v2: Vec<_> = iter2.collect();
|
||||
assert_eq!(v1, v2);
|
||||
}
|
||||
|
||||
let set_a: IndexSet<_> = (0..3).collect();
|
||||
let set_b: IndexSet<_> = (3..6).collect();
|
||||
let set_c: IndexSet<_> = (0..6).collect();
|
||||
let set_d: IndexSet<_> = (3..9).rev().collect();
|
||||
|
||||
check(set_a.par_difference(&set_a), empty());
|
||||
check(set_a.par_symmetric_difference(&set_a), empty());
|
||||
check(set_a.par_intersection(&set_a), 0..3);
|
||||
check(set_a.par_union(&set_a), 0..3);
|
||||
|
||||
check(set_a.par_difference(&set_b), 0..3);
|
||||
check(set_b.par_difference(&set_a), 3..6);
|
||||
check(set_a.par_symmetric_difference(&set_b), 0..6);
|
||||
check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3));
|
||||
check(set_a.par_intersection(&set_b), empty());
|
||||
check(set_b.par_intersection(&set_a), empty());
|
||||
check(set_a.par_union(&set_b), 0..6);
|
||||
check(set_b.par_union(&set_a), (3..6).chain(0..3));
|
||||
|
||||
check(set_a.par_difference(&set_c), empty());
|
||||
check(set_c.par_difference(&set_a), 3..6);
|
||||
check(set_a.par_symmetric_difference(&set_c), 3..6);
|
||||
check(set_c.par_symmetric_difference(&set_a), 3..6);
|
||||
check(set_a.par_intersection(&set_c), 0..3);
|
||||
check(set_c.par_intersection(&set_a), 0..3);
|
||||
check(set_a.par_union(&set_c), 0..6);
|
||||
check(set_c.par_union(&set_a), 0..6);
|
||||
|
||||
check(set_c.par_difference(&set_d), 0..3);
|
||||
check(set_d.par_difference(&set_c), (6..9).rev());
|
||||
check(
|
||||
set_c.par_symmetric_difference(&set_d),
|
||||
(0..3).chain((6..9).rev()),
|
||||
);
|
||||
check(
|
||||
set_d.par_symmetric_difference(&set_c),
|
||||
(6..9).rev().chain(0..3),
|
||||
);
|
||||
check(set_c.par_intersection(&set_d), 3..6);
|
||||
check(set_d.par_intersection(&set_c), (3..6).rev());
|
||||
check(set_c.par_union(&set_d), (0..6).chain((6..9).rev()));
|
||||
check(set_d.par_union(&set_c), (3..9).rev().chain(0..3));
|
||||
}
|
||||
}
|
||||
158
third-party/vendor/indexmap/src/rustc.rs
vendored
Normal file
158
third-party/vendor/indexmap/src/rustc.rs
vendored
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
//! Minimal support for `rustc-rayon`, not intended for general use.
|
||||
|
||||
use crate::vec::Vec;
|
||||
use crate::{Bucket, Entries, IndexMap, IndexSet};
|
||||
|
||||
use rustc_rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rustc_rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
|
||||
|
||||
mod map {
|
||||
use super::*;
|
||||
|
||||
impl<K, V, S> IntoParallelIterator for IndexMap<K, V, S>
|
||||
where
|
||||
K: Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (K, V);
|
||||
type Iter = IntoParIter<K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
IntoParIter {
|
||||
entries: self.into_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IntoParIter<K, V> {
|
||||
entries: Vec<Bucket<K, V>>,
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> ParallelIterator for IntoParIter<K, V> {
|
||||
type Item = (K, V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<K: Send, V: Send> IndexedParallelIterator for IntoParIter<K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key_value);
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap<K, V, S>
|
||||
where
|
||||
K: Sync,
|
||||
V: Sync,
|
||||
{
|
||||
type Item = (&'a K, &'a V);
|
||||
type Iter = ParIter<'a, K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIter {
|
||||
entries: self.as_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ParIter<'a, K, V> {
|
||||
entries: &'a [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
|
||||
type Item = (&'a K, &'a V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::refs);
|
||||
}
|
||||
|
||||
impl<K: Sync, V: Sync> IndexedParallelIterator for ParIter<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::refs);
|
||||
}
|
||||
|
||||
impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap<K, V, S>
|
||||
where
|
||||
K: Sync + Send,
|
||||
V: Send,
|
||||
{
|
||||
type Item = (&'a K, &'a mut V);
|
||||
type Iter = ParIterMut<'a, K, V>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIterMut {
|
||||
entries: self.as_entries_mut(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ParIterMut<'a, K, V> {
|
||||
entries: &'a mut [Bucket<K, V>],
|
||||
}
|
||||
|
||||
impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
|
||||
type Item = (&'a K, &'a mut V);
|
||||
|
||||
parallel_iterator_methods!(Bucket::ref_mut);
|
||||
}
|
||||
|
||||
impl<K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'_, K, V> {
|
||||
indexed_parallel_iterator_methods!(Bucket::ref_mut);
|
||||
}
|
||||
}
|
||||
|
||||
mod set {
|
||||
use super::*;
|
||||
|
||||
impl<T, S> IntoParallelIterator for IndexSet<T, S>
|
||||
where
|
||||
T: Send,
|
||||
{
|
||||
type Item = T;
|
||||
type Iter = IntoParIter<T>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
IntoParIter {
|
||||
entries: self.into_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IntoParIter<T> {
|
||||
entries: Vec<Bucket<T, ()>>,
|
||||
}
|
||||
|
||||
impl<T: Send> ParallelIterator for IntoParIter<T> {
|
||||
type Item = T;
|
||||
|
||||
parallel_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T: Send> IndexedParallelIterator for IntoParIter<T> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<'a, T, S> IntoParallelIterator for &'a IndexSet<T, S>
|
||||
where
|
||||
T: Sync,
|
||||
{
|
||||
type Item = &'a T;
|
||||
type Iter = ParIter<'a, T>;
|
||||
|
||||
fn into_par_iter(self) -> Self::Iter {
|
||||
ParIter {
|
||||
entries: self.as_entries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ParIter<'a, T> {
|
||||
entries: &'a [Bucket<T, ()>],
|
||||
}
|
||||
|
||||
impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
|
||||
type Item = &'a T;
|
||||
|
||||
parallel_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<T: Sync> IndexedParallelIterator for ParIter<'_, T> {
|
||||
indexed_parallel_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
}
|
||||
166
third-party/vendor/indexmap/src/serde.rs
vendored
Normal file
166
third-party/vendor/indexmap/src/serde.rs
vendored
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
#![cfg_attr(docsrs, doc(cfg(feature = "serde")))]
|
||||
|
||||
use serde::de::value::{MapDeserializer, SeqDeserializer};
|
||||
use serde::de::{
|
||||
Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor,
|
||||
};
|
||||
use serde::ser::{Serialize, Serializer};
|
||||
|
||||
use core::fmt::{self, Formatter};
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use core::marker::PhantomData;
|
||||
use core::{cmp, mem};
|
||||
|
||||
use crate::{Bucket, IndexMap, IndexSet};
|
||||
|
||||
/// Limit our preallocated capacity from a deserializer `size_hint()`.
|
||||
///
|
||||
/// We do account for the `Bucket` overhead from its saved `hash` field, but we don't count the
|
||||
/// `RawTable` allocation or the fact that its raw capacity will be rounded up to a power of two.
|
||||
/// The "max" is an arbitrary choice anyway, not something that needs precise adherence.
|
||||
///
|
||||
/// This is based on the internal `serde::de::size_hint::cautious(hint)` function.
|
||||
pub(crate) fn cautious_capacity<K, V>(hint: Option<usize>) -> usize {
|
||||
const MAX_PREALLOC_BYTES: usize = 1024 * 1024;
|
||||
|
||||
cmp::min(
|
||||
hint.unwrap_or(0),
|
||||
MAX_PREALLOC_BYTES / mem::size_of::<Bucket<K, V>>(),
|
||||
)
|
||||
}
|
||||
|
||||
impl<K, V, S> Serialize for IndexMap<K, V, S>
|
||||
where
|
||||
K: Serialize,
|
||||
V: Serialize,
|
||||
{
|
||||
fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
|
||||
where
|
||||
T: Serializer,
|
||||
{
|
||||
serializer.collect_map(self)
|
||||
}
|
||||
}
|
||||
|
||||
struct IndexMapVisitor<K, V, S>(PhantomData<(K, V, S)>);
|
||||
|
||||
impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor<K, V, S>
|
||||
where
|
||||
K: Deserialize<'de> + Eq + Hash,
|
||||
V: Deserialize<'de>,
|
||||
S: Default + BuildHasher,
|
||||
{
|
||||
type Value = IndexMap<K, V, S>;
|
||||
|
||||
fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(formatter, "a map")
|
||||
}
|
||||
|
||||
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: MapAccess<'de>,
|
||||
{
|
||||
let capacity = cautious_capacity::<K, V>(map.size_hint());
|
||||
let mut values = IndexMap::with_capacity_and_hasher(capacity, S::default());
|
||||
|
||||
while let Some((key, value)) = map.next_entry()? {
|
||||
values.insert(key, value);
|
||||
}
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, K, V, S> Deserialize<'de> for IndexMap<K, V, S>
|
||||
where
|
||||
K: Deserialize<'de> + Eq + Hash,
|
||||
V: Deserialize<'de>,
|
||||
S: Default + BuildHasher,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_map(IndexMapVisitor(PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap<K, V, S>
|
||||
where
|
||||
K: IntoDeserializer<'de, E> + Eq + Hash,
|
||||
V: IntoDeserializer<'de, E>,
|
||||
S: BuildHasher,
|
||||
E: Error,
|
||||
{
|
||||
type Deserializer = MapDeserializer<'de, <Self as IntoIterator>::IntoIter, E>;
|
||||
|
||||
fn into_deserializer(self) -> Self::Deserializer {
|
||||
MapDeserializer::new(self.into_iter())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> Serialize for IndexSet<T, S>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
fn serialize<Se>(&self, serializer: Se) -> Result<Se::Ok, Se::Error>
|
||||
where
|
||||
Se: Serializer,
|
||||
{
|
||||
serializer.collect_seq(self)
|
||||
}
|
||||
}
|
||||
|
||||
struct IndexSetVisitor<T, S>(PhantomData<(T, S)>);
|
||||
|
||||
impl<'de, T, S> Visitor<'de> for IndexSetVisitor<T, S>
|
||||
where
|
||||
T: Deserialize<'de> + Eq + Hash,
|
||||
S: Default + BuildHasher,
|
||||
{
|
||||
type Value = IndexSet<T, S>;
|
||||
|
||||
fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(formatter, "a set")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let capacity = cautious_capacity::<T, ()>(seq.size_hint());
|
||||
let mut values = IndexSet::with_capacity_and_hasher(capacity, S::default());
|
||||
|
||||
while let Some(value) = seq.next_element()? {
|
||||
values.insert(value);
|
||||
}
|
||||
|
||||
Ok(values)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T, S> Deserialize<'de> for IndexSet<T, S>
|
||||
where
|
||||
T: Deserialize<'de> + Eq + Hash,
|
||||
S: Default + BuildHasher,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_seq(IndexSetVisitor(PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet<T, S>
|
||||
where
|
||||
T: IntoDeserializer<'de, E> + Eq + Hash,
|
||||
S: BuildHasher,
|
||||
E: Error,
|
||||
{
|
||||
type Deserializer = SeqDeserializer<<Self as IntoIterator>::IntoIter, E>;
|
||||
|
||||
fn into_deserializer(self) -> Self::Deserializer {
|
||||
SeqDeserializer::new(self.into_iter())
|
||||
}
|
||||
}
|
||||
1167
third-party/vendor/indexmap/src/set.rs
vendored
Normal file
1167
third-party/vendor/indexmap/src/set.rs
vendored
Normal file
File diff suppressed because it is too large
Load diff
626
third-party/vendor/indexmap/src/set/iter.rs
vendored
Normal file
626
third-party/vendor/indexmap/src/set/iter.rs
vendored
Normal file
|
|
@ -0,0 +1,626 @@
|
|||
use super::{Bucket, Entries, IndexSet, Slice};
|
||||
|
||||
use alloc::vec::{self, Vec};
|
||||
use core::fmt;
|
||||
use core::hash::{BuildHasher, Hash};
|
||||
use core::iter::{Chain, FusedIterator};
|
||||
use core::ops::RangeBounds;
|
||||
use core::slice::Iter as SliceIter;
|
||||
|
||||
impl<'a, T, S> IntoIterator for &'a IndexSet<T, S> {
|
||||
type Item = &'a T;
|
||||
type IntoIter = Iter<'a, T>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> IntoIterator for IndexSet<T, S> {
|
||||
type Item = T;
|
||||
type IntoIter = IntoIter<T>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
IntoIter::new(self.into_entries())
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over the items of an [`IndexSet`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::iter`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Iter<'a, T> {
|
||||
iter: SliceIter<'a, Bucket<T>>,
|
||||
}
|
||||
|
||||
impl<'a, T> Iter<'a, T> {
|
||||
pub(super) fn new(entries: &'a [Bucket<T>]) -> Self {
|
||||
Self {
|
||||
iter: entries.iter(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &'a Slice<T> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Iterator for Iter<'a, T> {
|
||||
type Item = &'a T;
|
||||
|
||||
iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<T> DoubleEndedIterator for Iter<'_, T> {
|
||||
double_ended_iterator_methods!(Bucket::key_ref);
|
||||
}
|
||||
|
||||
impl<T> ExactSizeIterator for Iter<'_, T> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FusedIterator for Iter<'_, T> {}
|
||||
|
||||
impl<T> Clone for Iter<'_, T> {
|
||||
fn clone(&self) -> Self {
|
||||
Iter {
|
||||
iter: self.iter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Iter<'_, T> {
|
||||
fn default() -> Self {
|
||||
Self { iter: [].iter() }
|
||||
}
|
||||
}
|
||||
|
||||
/// An owning iterator over the items of an [`IndexSet`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::into_iter`] method
|
||||
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
|
||||
pub struct IntoIter<T> {
|
||||
iter: vec::IntoIter<Bucket<T>>,
|
||||
}
|
||||
|
||||
impl<T> IntoIter<T> {
|
||||
pub(super) fn new(entries: Vec<Bucket<T>>) -> Self {
|
||||
Self {
|
||||
iter: entries.into_iter(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &Slice<T> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Iterator for IntoIter<T> {
|
||||
type Item = T;
|
||||
|
||||
iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T> DoubleEndedIterator for IntoIter<T> {
|
||||
double_ended_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T> ExactSizeIterator for IntoIter<T> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FusedIterator for IntoIter<T> {}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for IntoIter<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
iter: Vec::new().into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A draining iterator over the items of an [`IndexSet`].
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::drain`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Drain<'a, T> {
|
||||
iter: vec::Drain<'a, Bucket<T>>,
|
||||
}
|
||||
|
||||
impl<'a, T> Drain<'a, T> {
|
||||
pub(super) fn new(iter: vec::Drain<'a, Bucket<T>>) -> Self {
|
||||
Self { iter }
|
||||
}
|
||||
|
||||
/// Returns a slice of the remaining entries in the iterator.
|
||||
pub fn as_slice(&self) -> &Slice<T> {
|
||||
Slice::from_slice(self.iter.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Iterator for Drain<'_, T> {
|
||||
type Item = T;
|
||||
|
||||
iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T> DoubleEndedIterator for Drain<'_, T> {
|
||||
double_ended_iterator_methods!(Bucket::key);
|
||||
}
|
||||
|
||||
impl<T> ExactSizeIterator for Drain<'_, T> {
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FusedIterator for Drain<'_, T> {}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for Drain<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
|
||||
f.debug_list().entries(iter).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A lazy iterator producing elements in the difference of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::difference`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Difference<'a, T, S> {
|
||||
iter: Iter<'a, T>,
|
||||
other: &'a IndexSet<T, S>,
|
||||
}
|
||||
|
||||
impl<'a, T, S> Difference<'a, T, S> {
|
||||
pub(super) fn new<S1>(set: &'a IndexSet<T, S1>, other: &'a IndexSet<T, S>) -> Self {
|
||||
Self {
|
||||
iter: set.iter(),
|
||||
other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S> Iterator for Difference<'a, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while let Some(item) = self.iter.next() {
|
||||
if !self.other.contains(item) {
|
||||
return Some(item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(0, self.iter.size_hint().1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> DoubleEndedIterator for Difference<'_, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
while let Some(item) = self.iter.next_back() {
|
||||
if !self.other.contains(item) {
|
||||
return Some(item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> FusedIterator for Difference<'_, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, S> Clone for Difference<'_, T, S> {
|
||||
fn clone(&self) -> Self {
|
||||
Difference {
|
||||
iter: self.iter.clone(),
|
||||
..*self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> fmt::Debug for Difference<'_, T, S>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A lazy iterator producing elements in the intersection of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::intersection`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Intersection<'a, T, S> {
|
||||
iter: Iter<'a, T>,
|
||||
other: &'a IndexSet<T, S>,
|
||||
}
|
||||
|
||||
impl<'a, T, S> Intersection<'a, T, S> {
|
||||
pub(super) fn new<S1>(set: &'a IndexSet<T, S1>, other: &'a IndexSet<T, S>) -> Self {
|
||||
Self {
|
||||
iter: set.iter(),
|
||||
other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S> Iterator for Intersection<'a, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while let Some(item) = self.iter.next() {
|
||||
if self.other.contains(item) {
|
||||
return Some(item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(0, self.iter.size_hint().1)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> DoubleEndedIterator for Intersection<'_, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
while let Some(item) = self.iter.next_back() {
|
||||
if self.other.contains(item) {
|
||||
return Some(item);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> FusedIterator for Intersection<'_, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, S> Clone for Intersection<'_, T, S> {
|
||||
fn clone(&self) -> Self {
|
||||
Intersection {
|
||||
iter: self.iter.clone(),
|
||||
..*self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> fmt::Debug for Intersection<'_, T, S>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A lazy iterator producing elements in the symmetric difference of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::symmetric_difference`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct SymmetricDifference<'a, T, S1, S2> {
|
||||
iter: Chain<Difference<'a, T, S2>, Difference<'a, T, S1>>,
|
||||
}
|
||||
|
||||
impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
pub(super) fn new(set1: &'a IndexSet<T, S1>, set2: &'a IndexSet<T, S2>) -> Self {
|
||||
let diff1 = set1.difference(set2);
|
||||
let diff2 = set2.difference(set1);
|
||||
Self {
|
||||
iter: diff1.chain(diff2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.size_hint()
|
||||
}
|
||||
|
||||
fn fold<B, F>(self, init: B, f: F) -> B
|
||||
where
|
||||
F: FnMut(B, Self::Item) -> B,
|
||||
{
|
||||
self.iter.fold(init, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next_back()
|
||||
}
|
||||
|
||||
fn rfold<B, F>(self, init: B, f: F) -> B
|
||||
where
|
||||
F: FnMut(B, Self::Item) -> B,
|
||||
{
|
||||
self.iter.rfold(init, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> FusedIterator for SymmetricDifference<'_, T, S1, S2>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, S1, S2> Clone for SymmetricDifference<'_, T, S1, S2> {
|
||||
fn clone(&self) -> Self {
|
||||
SymmetricDifference {
|
||||
iter: self.iter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S1, S2> fmt::Debug for SymmetricDifference<'_, T, S1, S2>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S1: BuildHasher,
|
||||
S2: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A lazy iterator producing elements in the union of [`IndexSet`]s.
|
||||
///
|
||||
/// This `struct` is created by the [`IndexSet::union`] method.
|
||||
/// See its documentation for more.
|
||||
pub struct Union<'a, T, S> {
|
||||
iter: Chain<Iter<'a, T>, Difference<'a, T, S>>,
|
||||
}
|
||||
|
||||
impl<'a, T, S> Union<'a, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
pub(super) fn new<S2>(set1: &'a IndexSet<T, S>, set2: &'a IndexSet<T, S2>) -> Self
|
||||
where
|
||||
S2: BuildHasher,
|
||||
{
|
||||
Self {
|
||||
iter: set1.iter().chain(set2.difference(set1)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, S> Iterator for Union<'a, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
type Item = &'a T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next()
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.size_hint()
|
||||
}
|
||||
|
||||
fn fold<B, F>(self, init: B, f: F) -> B
|
||||
where
|
||||
F: FnMut(B, Self::Item) -> B,
|
||||
{
|
||||
self.iter.fold(init, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> DoubleEndedIterator for Union<'_, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
self.iter.next_back()
|
||||
}
|
||||
|
||||
fn rfold<B, F>(self, init: B, f: F) -> B
|
||||
where
|
||||
F: FnMut(B, Self::Item) -> B,
|
||||
{
|
||||
self.iter.rfold(init, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> FusedIterator for Union<'_, T, S>
|
||||
where
|
||||
T: Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, S> Clone for Union<'_, T, S> {
|
||||
fn clone(&self) -> Self {
|
||||
Union {
|
||||
iter: self.iter.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S> fmt::Debug for Union<'_, T, S>
|
||||
where
|
||||
T: fmt::Debug + Eq + Hash,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A splicing iterator for `IndexSet`.
|
||||
///
|
||||
/// This `struct` is created by [`IndexSet::splice()`].
|
||||
/// See its documentation for more.
|
||||
pub struct Splice<'a, I, T, S>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
iter: crate::map::Splice<'a, UnitValue<I>, T, (), S>,
|
||||
}
|
||||
|
||||
impl<'a, I, T, S> Splice<'a, I, T, S>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
pub(super) fn new<R>(set: &'a mut IndexSet<T, S>, range: R, replace_with: I) -> Self
|
||||
where
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
Self {
|
||||
iter: set.map.splice(range, UnitValue(replace_with)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, T, S> Iterator for Splice<'_, I, T, S>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
Some(self.iter.next()?.0)
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.iter.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, T, S> DoubleEndedIterator for Splice<'_, I, T, S>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
Some(self.iter.next_back()?.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, T, S> ExactSizeIterator for Splice<'_, I, T, S>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.iter.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, T, S> FusedIterator for Splice<'_, I, T, S>
|
||||
where
|
||||
I: Iterator<Item = T>,
|
||||
T: Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
}
|
||||
|
||||
struct UnitValue<I>(I);
|
||||
|
||||
impl<I: Iterator> Iterator for UnitValue<I> {
|
||||
type Item = (I::Item, ());
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(|x| (x, ()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, I, T, S> fmt::Debug for Splice<'a, I, T, S>
|
||||
where
|
||||
I: fmt::Debug + Iterator<Item = T>,
|
||||
T: fmt::Debug + Hash + Eq,
|
||||
S: BuildHasher,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&self.iter, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: fmt::Debug> fmt::Debug for UnitValue<I> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Debug::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
341
third-party/vendor/indexmap/src/set/slice.rs
vendored
Normal file
341
third-party/vendor/indexmap/src/set/slice.rs
vendored
Normal file
|
|
@ -0,0 +1,341 @@
|
|||
use super::{Bucket, Entries, IndexSet, IntoIter, Iter};
|
||||
use crate::util::try_simplify_range;
|
||||
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
use core::cmp::Ordering;
|
||||
use core::fmt;
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::ops::{self, Bound, Index, RangeBounds};
|
||||
|
||||
/// A dynamically-sized slice of values in an [`IndexSet`].
|
||||
///
|
||||
/// This supports indexed operations much like a `[T]` slice,
|
||||
/// but not any hashed operations on the values.
|
||||
///
|
||||
/// Unlike `IndexSet`, `Slice` does consider the order for [`PartialEq`]
|
||||
/// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`].
|
||||
#[repr(transparent)]
|
||||
pub struct Slice<T> {
|
||||
pub(crate) entries: [Bucket<T>],
|
||||
}
|
||||
|
||||
// SAFETY: `Slice<T>` is a transparent wrapper around `[Bucket<T>]`,
|
||||
// and reference lifetimes are bound together in function signatures.
|
||||
#[allow(unsafe_code)]
|
||||
impl<T> Slice<T> {
|
||||
pub(super) const fn from_slice(entries: &[Bucket<T>]) -> &Self {
|
||||
unsafe { &*(entries as *const [Bucket<T>] as *const Self) }
|
||||
}
|
||||
|
||||
pub(super) fn from_boxed(entries: Box<[Bucket<T>]>) -> Box<Self> {
|
||||
unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) }
|
||||
}
|
||||
|
||||
fn into_boxed(self: Box<Self>) -> Box<[Bucket<T>]> {
|
||||
unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket<T>]) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Slice<T> {
|
||||
pub(crate) fn into_entries(self: Box<Self>) -> Vec<Bucket<T>> {
|
||||
self.into_boxed().into_vec()
|
||||
}
|
||||
|
||||
/// Returns an empty slice.
|
||||
pub const fn new<'a>() -> &'a Self {
|
||||
Self::from_slice(&[])
|
||||
}
|
||||
|
||||
/// Return the number of elements in the set slice.
|
||||
pub const fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
/// Returns true if the set slice contains no elements.
|
||||
pub const fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
/// Get a value by index.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
pub fn get_index(&self, index: usize) -> Option<&T> {
|
||||
self.entries.get(index).map(Bucket::key_ref)
|
||||
}
|
||||
|
||||
/// Returns a slice of values in the given range of indices.
|
||||
///
|
||||
/// Valid indices are *0 <= index < self.len()*
|
||||
pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Self> {
|
||||
let range = try_simplify_range(range, self.entries.len())?;
|
||||
self.entries.get(range).map(Self::from_slice)
|
||||
}
|
||||
|
||||
/// Get the first value.
|
||||
pub fn first(&self) -> Option<&T> {
|
||||
self.entries.first().map(Bucket::key_ref)
|
||||
}
|
||||
|
||||
/// Get the last value.
|
||||
pub fn last(&self) -> Option<&T> {
|
||||
self.entries.last().map(Bucket::key_ref)
|
||||
}
|
||||
|
||||
/// Divides one slice into two at an index.
|
||||
///
|
||||
/// ***Panics*** if `index > len`.
|
||||
pub fn split_at(&self, index: usize) -> (&Self, &Self) {
|
||||
let (first, second) = self.entries.split_at(index);
|
||||
(Self::from_slice(first), Self::from_slice(second))
|
||||
}
|
||||
|
||||
/// Returns the first value and the rest of the slice,
|
||||
/// or `None` if it is empty.
|
||||
pub fn split_first(&self) -> Option<(&T, &Self)> {
|
||||
if let [first, rest @ ..] = &self.entries {
|
||||
Some((&first.key, Self::from_slice(rest)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the last value and the rest of the slice,
|
||||
/// or `None` if it is empty.
|
||||
pub fn split_last(&self) -> Option<(&T, &Self)> {
|
||||
if let [rest @ .., last] = &self.entries {
|
||||
Some((&last.key, Self::from_slice(rest)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an iterator over the values of the set slice.
|
||||
pub fn iter(&self) -> Iter<'_, T> {
|
||||
Iter::new(&self.entries)
|
||||
}
|
||||
|
||||
/// Search over a sorted set for a value.
|
||||
///
|
||||
/// Returns the position where that value is present, or the position where it can be inserted
|
||||
/// to maintain the sort. See [`slice::binary_search`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time, which is notably less scalable than looking the value up in
|
||||
/// the set this is a slice from using [`IndexSet::get_index_of`], but this can also position
|
||||
/// missing values.
|
||||
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
|
||||
where
|
||||
T: Ord,
|
||||
{
|
||||
self.binary_search_by(|p| p.cmp(x))
|
||||
}
|
||||
|
||||
/// Search over a sorted set with a comparator function.
|
||||
///
|
||||
/// Returns the position where that value is present, or the position where it can be inserted
|
||||
/// to maintain the sort. See [`slice::binary_search_by`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time.
|
||||
#[inline]
|
||||
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
|
||||
where
|
||||
F: FnMut(&'a T) -> Ordering,
|
||||
{
|
||||
self.entries.binary_search_by(move |a| f(&a.key))
|
||||
}
|
||||
|
||||
/// Search over a sorted set with an extraction function.
|
||||
///
|
||||
/// Returns the position where that value is present, or the position where it can be inserted
|
||||
/// to maintain the sort. See [`slice::binary_search_by_key`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time.
|
||||
#[inline]
|
||||
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
|
||||
where
|
||||
F: FnMut(&'a T) -> B,
|
||||
B: Ord,
|
||||
{
|
||||
self.binary_search_by(|k| f(k).cmp(b))
|
||||
}
|
||||
|
||||
/// Returns the index of the partition point of a sorted set according to the given predicate
|
||||
/// (the index of the first element of the second partition).
|
||||
///
|
||||
/// See [`slice::partition_point`] for more details.
|
||||
///
|
||||
/// Computes in **O(log(n))** time.
|
||||
#[must_use]
|
||||
pub fn partition_point<P>(&self, mut pred: P) -> usize
|
||||
where
|
||||
P: FnMut(&T) -> bool,
|
||||
{
|
||||
self.entries.partition_point(move |a| pred(&a.key))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> IntoIterator for &'a Slice<T> {
|
||||
type IntoIter = Iter<'a, T>;
|
||||
type Item = &'a T;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> IntoIterator for Box<Slice<T>> {
|
||||
type IntoIter = IntoIter<T>;
|
||||
type Item = T;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
IntoIter::new(self.into_entries())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for &'_ Slice<T> {
|
||||
fn default() -> Self {
|
||||
Slice::from_slice(&[])
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Box<Slice<T>> {
|
||||
fn default() -> Self {
|
||||
Slice::from_boxed(Box::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for Box<Slice<T>> {
|
||||
fn clone(&self) -> Self {
|
||||
Slice::from_boxed(self.entries.to_vec().into_boxed_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy> From<&Slice<T>> for Box<Slice<T>> {
|
||||
fn from(slice: &Slice<T>) -> Self {
|
||||
Slice::from_boxed(Box::from(&slice.entries))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for Slice<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialEq> PartialEq for Slice<T> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.len() == other.len() && self.iter().eq(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Eq> Eq for Slice<T> {}
|
||||
|
||||
impl<T: PartialOrd> PartialOrd for Slice<T> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
self.iter().partial_cmp(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Ord> Ord for Slice<T> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.iter().cmp(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hash> Hash for Slice<T> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.len().hash(state);
|
||||
for value in self {
|
||||
value.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Index<usize> for Slice<T> {
|
||||
type Output = T;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.entries[index].key
|
||||
}
|
||||
}
|
||||
|
||||
// We can't have `impl<I: RangeBounds<usize>> Index<I>` because that conflicts with `Index<usize>`.
|
||||
// Instead, we repeat the implementations for all the core range types.
|
||||
macro_rules! impl_index {
|
||||
($($range:ty),*) => {$(
|
||||
impl<T, S> Index<$range> for IndexSet<T, S> {
|
||||
type Output = Slice<T>;
|
||||
|
||||
fn index(&self, range: $range) -> &Self::Output {
|
||||
Slice::from_slice(&self.as_entries()[range])
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Index<$range> for Slice<T> {
|
||||
type Output = Self;
|
||||
|
||||
fn index(&self, range: $range) -> &Self::Output {
|
||||
Slice::from_slice(&self.entries[range])
|
||||
}
|
||||
}
|
||||
)*}
|
||||
}
|
||||
impl_index!(
|
||||
ops::Range<usize>,
|
||||
ops::RangeFrom<usize>,
|
||||
ops::RangeFull,
|
||||
ops::RangeInclusive<usize>,
|
||||
ops::RangeTo<usize>,
|
||||
ops::RangeToInclusive<usize>,
|
||||
(Bound<usize>, Bound<usize>)
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[test]
|
||||
fn slice_index() {
|
||||
fn check(vec_slice: &[i32], set_slice: &Slice<i32>, sub_slice: &Slice<i32>) {
|
||||
assert_eq!(set_slice as *const _, sub_slice as *const _);
|
||||
itertools::assert_equal(vec_slice, set_slice);
|
||||
}
|
||||
|
||||
let vec: Vec<i32> = (0..10).map(|i| i * i).collect();
|
||||
let set: IndexSet<i32> = vec.iter().cloned().collect();
|
||||
let slice = set.as_slice();
|
||||
|
||||
// RangeFull
|
||||
check(&vec[..], &set[..], &slice[..]);
|
||||
|
||||
for i in 0usize..10 {
|
||||
// Index
|
||||
assert_eq!(vec[i], set[i]);
|
||||
assert_eq!(vec[i], slice[i]);
|
||||
|
||||
// RangeFrom
|
||||
check(&vec[i..], &set[i..], &slice[i..]);
|
||||
|
||||
// RangeTo
|
||||
check(&vec[..i], &set[..i], &slice[..i]);
|
||||
|
||||
// RangeToInclusive
|
||||
check(&vec[..=i], &set[..=i], &slice[..=i]);
|
||||
|
||||
// (Bound<usize>, Bound<usize>)
|
||||
let bounds = (Bound::Excluded(i), Bound::Unbounded);
|
||||
check(&vec[i + 1..], &set[bounds], &slice[bounds]);
|
||||
|
||||
for j in i..=10 {
|
||||
// Range
|
||||
check(&vec[i..j], &set[i..j], &slice[i..j]);
|
||||
}
|
||||
|
||||
for j in i..10 {
|
||||
// RangeInclusive
|
||||
check(&vec[i..=j], &set[i..=j], &slice[i..=j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
723
third-party/vendor/indexmap/src/set/tests.rs
vendored
Normal file
723
third-party/vendor/indexmap/src/set/tests.rs
vendored
Normal file
|
|
@ -0,0 +1,723 @@
|
|||
use super::*;
|
||||
use std::string::String;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let mut set = IndexSet::new();
|
||||
assert_eq!(set.is_empty(), true);
|
||||
set.insert(1);
|
||||
set.insert(1);
|
||||
assert_eq!(set.len(), 1);
|
||||
assert!(set.get(&1).is_some());
|
||||
assert_eq!(set.is_empty(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new() {
|
||||
let set = IndexSet::<String>::new();
|
||||
println!("{:?}", set);
|
||||
assert_eq!(set.capacity(), 0);
|
||||
assert_eq!(set.len(), 0);
|
||||
assert_eq!(set.is_empty(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5];
|
||||
let not_present = [1, 3, 6, 9, 10];
|
||||
let mut set = IndexSet::with_capacity(insert.len());
|
||||
|
||||
for (i, &elt) in insert.iter().enumerate() {
|
||||
assert_eq!(set.len(), i);
|
||||
set.insert(elt);
|
||||
assert_eq!(set.len(), i + 1);
|
||||
assert_eq!(set.get(&elt), Some(&elt));
|
||||
}
|
||||
println!("{:?}", set);
|
||||
|
||||
for &elt in ¬_present {
|
||||
assert!(set.get(&elt).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_full() {
|
||||
let insert = vec![9, 2, 7, 1, 4, 6, 13];
|
||||
let present = vec![1, 6, 2];
|
||||
let mut set = IndexSet::with_capacity(insert.len());
|
||||
|
||||
for (i, &elt) in insert.iter().enumerate() {
|
||||
assert_eq!(set.len(), i);
|
||||
let (index, success) = set.insert_full(elt);
|
||||
assert!(success);
|
||||
assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
|
||||
assert_eq!(set.len(), i + 1);
|
||||
}
|
||||
|
||||
let len = set.len();
|
||||
for &elt in &present {
|
||||
let (index, success) = set.insert_full(elt);
|
||||
assert!(!success);
|
||||
assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
|
||||
assert_eq!(set.len(), len);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_2() {
|
||||
let mut set = IndexSet::with_capacity(16);
|
||||
|
||||
let mut values = vec![];
|
||||
values.extend(0..16);
|
||||
values.extend(if cfg!(miri) { 32..64 } else { 128..267 });
|
||||
|
||||
for &i in &values {
|
||||
let old_set = set.clone();
|
||||
set.insert(i);
|
||||
for value in old_set.iter() {
|
||||
if set.get(value).is_none() {
|
||||
println!("old_set: {:?}", old_set);
|
||||
println!("set: {:?}", set);
|
||||
panic!("did not find {} in set", value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for &i in &values {
|
||||
assert!(set.get(&i).is_some(), "did not find {}", i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_dup() {
|
||||
let mut elements = vec![0, 2, 4, 6, 8];
|
||||
let mut set: IndexSet<u8> = elements.drain(..).collect();
|
||||
{
|
||||
let (i, v) = set.get_full(&0).unwrap();
|
||||
assert_eq!(set.len(), 5);
|
||||
assert_eq!(i, 0);
|
||||
assert_eq!(*v, 0);
|
||||
}
|
||||
{
|
||||
let inserted = set.insert(0);
|
||||
let (i, v) = set.get_full(&0).unwrap();
|
||||
assert_eq!(set.len(), 5);
|
||||
assert_eq!(inserted, false);
|
||||
assert_eq!(i, 0);
|
||||
assert_eq!(*v, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_order() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut set = IndexSet::new();
|
||||
|
||||
for &elt in &insert {
|
||||
set.insert(elt);
|
||||
}
|
||||
|
||||
assert_eq!(set.iter().count(), set.len());
|
||||
assert_eq!(set.iter().count(), insert.len());
|
||||
for (a, b) in insert.iter().zip(set.iter()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
for (i, v) in (0..insert.len()).zip(set.iter()) {
|
||||
assert_eq!(set.get_index(i).unwrap(), v);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shift_insert() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut set = IndexSet::new();
|
||||
|
||||
for &elt in &insert {
|
||||
set.shift_insert(0, elt);
|
||||
}
|
||||
|
||||
assert_eq!(set.iter().count(), set.len());
|
||||
assert_eq!(set.iter().count(), insert.len());
|
||||
for (a, b) in insert.iter().rev().zip(set.iter()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
for (i, v) in (0..insert.len()).zip(set.iter()) {
|
||||
assert_eq!(set.get_index(i).unwrap(), v);
|
||||
}
|
||||
|
||||
// "insert" that moves an existing entry
|
||||
set.shift_insert(0, insert[0]);
|
||||
assert_eq!(set.iter().count(), insert.len());
|
||||
assert_eq!(insert[0], set[0]);
|
||||
for (a, b) in insert[1..].iter().rev().zip(set.iter().skip(1)) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace() {
|
||||
let replace = [0, 4, 2, 12, 8, 7, 11, 5];
|
||||
let not_present = [1, 3, 6, 9, 10];
|
||||
let mut set = IndexSet::with_capacity(replace.len());
|
||||
|
||||
for (i, &elt) in replace.iter().enumerate() {
|
||||
assert_eq!(set.len(), i);
|
||||
set.replace(elt);
|
||||
assert_eq!(set.len(), i + 1);
|
||||
assert_eq!(set.get(&elt), Some(&elt));
|
||||
}
|
||||
println!("{:?}", set);
|
||||
|
||||
for &elt in ¬_present {
|
||||
assert!(set.get(&elt).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_full() {
|
||||
let replace = vec![9, 2, 7, 1, 4, 6, 13];
|
||||
let present = vec![1, 6, 2];
|
||||
let mut set = IndexSet::with_capacity(replace.len());
|
||||
|
||||
for (i, &elt) in replace.iter().enumerate() {
|
||||
assert_eq!(set.len(), i);
|
||||
let (index, replaced) = set.replace_full(elt);
|
||||
assert!(replaced.is_none());
|
||||
assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
|
||||
assert_eq!(set.len(), i + 1);
|
||||
}
|
||||
|
||||
let len = set.len();
|
||||
for &elt in &present {
|
||||
let (index, replaced) = set.replace_full(elt);
|
||||
assert_eq!(Some(elt), replaced);
|
||||
assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
|
||||
assert_eq!(set.len(), len);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_2() {
|
||||
let mut set = IndexSet::with_capacity(16);
|
||||
|
||||
let mut values = vec![];
|
||||
values.extend(0..16);
|
||||
values.extend(if cfg!(miri) { 32..64 } else { 128..267 });
|
||||
|
||||
for &i in &values {
|
||||
let old_set = set.clone();
|
||||
set.replace(i);
|
||||
for value in old_set.iter() {
|
||||
if set.get(value).is_none() {
|
||||
println!("old_set: {:?}", old_set);
|
||||
println!("set: {:?}", set);
|
||||
panic!("did not find {} in set", value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for &i in &values {
|
||||
assert!(set.get(&i).is_some(), "did not find {}", i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_dup() {
|
||||
let mut elements = vec![0, 2, 4, 6, 8];
|
||||
let mut set: IndexSet<u8> = elements.drain(..).collect();
|
||||
{
|
||||
let (i, v) = set.get_full(&0).unwrap();
|
||||
assert_eq!(set.len(), 5);
|
||||
assert_eq!(i, 0);
|
||||
assert_eq!(*v, 0);
|
||||
}
|
||||
{
|
||||
let replaced = set.replace(0);
|
||||
let (i, v) = set.get_full(&0).unwrap();
|
||||
assert_eq!(set.len(), 5);
|
||||
assert_eq!(replaced, Some(0));
|
||||
assert_eq!(i, 0);
|
||||
assert_eq!(*v, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_order() {
|
||||
let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut set = IndexSet::new();
|
||||
|
||||
for &elt in &replace {
|
||||
set.replace(elt);
|
||||
}
|
||||
|
||||
assert_eq!(set.iter().count(), set.len());
|
||||
assert_eq!(set.iter().count(), replace.len());
|
||||
for (a, b) in replace.iter().zip(set.iter()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
for (i, v) in (0..replace.len()).zip(set.iter()) {
|
||||
assert_eq!(set.get_index(i).unwrap(), v);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn replace_change() {
|
||||
// Check pointers to make sure it really changes
|
||||
let mut set = indexset!(vec![42]);
|
||||
let old_ptr = set[0].as_ptr();
|
||||
let new = set[0].clone();
|
||||
let new_ptr = new.as_ptr();
|
||||
assert_ne!(old_ptr, new_ptr);
|
||||
let replaced = set.replace(new).unwrap();
|
||||
assert_eq!(replaced.as_ptr(), old_ptr);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn grow() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11];
|
||||
let not_present = [1, 3, 6, 9, 10];
|
||||
let mut set = IndexSet::with_capacity(insert.len());
|
||||
|
||||
for (i, &elt) in insert.iter().enumerate() {
|
||||
assert_eq!(set.len(), i);
|
||||
set.insert(elt);
|
||||
assert_eq!(set.len(), i + 1);
|
||||
assert_eq!(set.get(&elt), Some(&elt));
|
||||
}
|
||||
|
||||
println!("{:?}", set);
|
||||
for &elt in &insert {
|
||||
set.insert(elt * 10);
|
||||
}
|
||||
for &elt in &insert {
|
||||
set.insert(elt * 100);
|
||||
}
|
||||
for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
|
||||
set.insert(elt * 100 + i as i32);
|
||||
}
|
||||
println!("{:?}", set);
|
||||
for &elt in ¬_present {
|
||||
assert!(set.get(&elt).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reserve() {
|
||||
let mut set = IndexSet::<usize>::new();
|
||||
assert_eq!(set.capacity(), 0);
|
||||
set.reserve(100);
|
||||
let capacity = set.capacity();
|
||||
assert!(capacity >= 100);
|
||||
for i in 0..capacity {
|
||||
assert_eq!(set.len(), i);
|
||||
set.insert(i);
|
||||
assert_eq!(set.len(), i + 1);
|
||||
assert_eq!(set.capacity(), capacity);
|
||||
assert_eq!(set.get(&i), Some(&i));
|
||||
}
|
||||
set.insert(capacity);
|
||||
assert_eq!(set.len(), capacity + 1);
|
||||
assert!(set.capacity() > capacity);
|
||||
assert_eq!(set.get(&capacity), Some(&capacity));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_reserve() {
|
||||
let mut set = IndexSet::<usize>::new();
|
||||
assert_eq!(set.capacity(), 0);
|
||||
assert_eq!(set.try_reserve(100), Ok(()));
|
||||
assert!(set.capacity() >= 100);
|
||||
assert!(set.try_reserve(usize::MAX).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shrink_to_fit() {
|
||||
let mut set = IndexSet::<usize>::new();
|
||||
assert_eq!(set.capacity(), 0);
|
||||
for i in 0..100 {
|
||||
assert_eq!(set.len(), i);
|
||||
set.insert(i);
|
||||
assert_eq!(set.len(), i + 1);
|
||||
assert!(set.capacity() >= i + 1);
|
||||
assert_eq!(set.get(&i), Some(&i));
|
||||
set.shrink_to_fit();
|
||||
assert_eq!(set.len(), i + 1);
|
||||
assert_eq!(set.capacity(), i + 1);
|
||||
assert_eq!(set.get(&i), Some(&i));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut set = IndexSet::new();
|
||||
|
||||
for &elt in &insert {
|
||||
set.insert(elt);
|
||||
}
|
||||
|
||||
assert_eq!(set.iter().count(), set.len());
|
||||
assert_eq!(set.iter().count(), insert.len());
|
||||
for (a, b) in insert.iter().zip(set.iter()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
|
||||
let remove_fail = [99, 77];
|
||||
let remove = [4, 12, 8, 7];
|
||||
|
||||
for &value in &remove_fail {
|
||||
assert!(set.swap_remove_full(&value).is_none());
|
||||
}
|
||||
println!("{:?}", set);
|
||||
for &value in &remove {
|
||||
//println!("{:?}", set);
|
||||
let index = set.get_full(&value).unwrap().0;
|
||||
assert_eq!(set.swap_remove_full(&value), Some((index, value)));
|
||||
}
|
||||
println!("{:?}", set);
|
||||
|
||||
for value in &insert {
|
||||
assert_eq!(set.get(value).is_some(), !remove.contains(value));
|
||||
}
|
||||
assert_eq!(set.len(), insert.len() - remove.len());
|
||||
assert_eq!(set.iter().count(), insert.len() - remove.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn swap_remove_index() {
|
||||
let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
|
||||
let mut set = IndexSet::new();
|
||||
|
||||
for &elt in &insert {
|
||||
set.insert(elt);
|
||||
}
|
||||
|
||||
let mut vector = insert.to_vec();
|
||||
let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
|
||||
|
||||
// check that the same swap remove sequence on vec and set
|
||||
// have the same result.
|
||||
for &rm in remove_sequence {
|
||||
let out_vec = vector.swap_remove(rm);
|
||||
let out_set = set.swap_remove_index(rm).unwrap();
|
||||
assert_eq!(out_vec, out_set);
|
||||
}
|
||||
assert_eq!(vector.len(), set.len());
|
||||
for (a, b) in vector.iter().zip(set.iter()) {
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_eq_and_eq() {
|
||||
let mut set_a = IndexSet::new();
|
||||
set_a.insert(1);
|
||||
set_a.insert(2);
|
||||
let mut set_b = set_a.clone();
|
||||
assert_eq!(set_a, set_b);
|
||||
set_b.swap_remove(&1);
|
||||
assert_ne!(set_a, set_b);
|
||||
|
||||
let set_c: IndexSet<_> = set_b.into_iter().collect();
|
||||
assert_ne!(set_a, set_c);
|
||||
assert_ne!(set_c, set_a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extend() {
|
||||
let mut set = IndexSet::new();
|
||||
set.extend(vec![&1, &2, &3, &4]);
|
||||
set.extend(vec![5, 6]);
|
||||
assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![1, 2, 3, 4, 5, 6]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn comparisons() {
|
||||
let set_a: IndexSet<_> = (0..3).collect();
|
||||
let set_b: IndexSet<_> = (3..6).collect();
|
||||
let set_c: IndexSet<_> = (0..6).collect();
|
||||
let set_d: IndexSet<_> = (3..9).collect();
|
||||
|
||||
assert!(!set_a.is_disjoint(&set_a));
|
||||
assert!(set_a.is_subset(&set_a));
|
||||
assert!(set_a.is_superset(&set_a));
|
||||
|
||||
assert!(set_a.is_disjoint(&set_b));
|
||||
assert!(set_b.is_disjoint(&set_a));
|
||||
assert!(!set_a.is_subset(&set_b));
|
||||
assert!(!set_b.is_subset(&set_a));
|
||||
assert!(!set_a.is_superset(&set_b));
|
||||
assert!(!set_b.is_superset(&set_a));
|
||||
|
||||
assert!(!set_a.is_disjoint(&set_c));
|
||||
assert!(!set_c.is_disjoint(&set_a));
|
||||
assert!(set_a.is_subset(&set_c));
|
||||
assert!(!set_c.is_subset(&set_a));
|
||||
assert!(!set_a.is_superset(&set_c));
|
||||
assert!(set_c.is_superset(&set_a));
|
||||
|
||||
assert!(!set_c.is_disjoint(&set_d));
|
||||
assert!(!set_d.is_disjoint(&set_c));
|
||||
assert!(!set_c.is_subset(&set_d));
|
||||
assert!(!set_d.is_subset(&set_c));
|
||||
assert!(!set_c.is_superset(&set_d));
|
||||
assert!(!set_d.is_superset(&set_c));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_comparisons() {
|
||||
use std::iter::empty;
|
||||
|
||||
fn check<'a, I1, I2>(iter1: I1, iter2: I2)
|
||||
where
|
||||
I1: Iterator<Item = &'a i32>,
|
||||
I2: Iterator<Item = i32>,
|
||||
{
|
||||
assert!(iter1.copied().eq(iter2));
|
||||
}
|
||||
|
||||
let set_a: IndexSet<_> = (0..3).collect();
|
||||
let set_b: IndexSet<_> = (3..6).collect();
|
||||
let set_c: IndexSet<_> = (0..6).collect();
|
||||
let set_d: IndexSet<_> = (3..9).rev().collect();
|
||||
|
||||
check(set_a.difference(&set_a), empty());
|
||||
check(set_a.symmetric_difference(&set_a), empty());
|
||||
check(set_a.intersection(&set_a), 0..3);
|
||||
check(set_a.union(&set_a), 0..3);
|
||||
|
||||
check(set_a.difference(&set_b), 0..3);
|
||||
check(set_b.difference(&set_a), 3..6);
|
||||
check(set_a.symmetric_difference(&set_b), 0..6);
|
||||
check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3));
|
||||
check(set_a.intersection(&set_b), empty());
|
||||
check(set_b.intersection(&set_a), empty());
|
||||
check(set_a.union(&set_b), 0..6);
|
||||
check(set_b.union(&set_a), (3..6).chain(0..3));
|
||||
|
||||
check(set_a.difference(&set_c), empty());
|
||||
check(set_c.difference(&set_a), 3..6);
|
||||
check(set_a.symmetric_difference(&set_c), 3..6);
|
||||
check(set_c.symmetric_difference(&set_a), 3..6);
|
||||
check(set_a.intersection(&set_c), 0..3);
|
||||
check(set_c.intersection(&set_a), 0..3);
|
||||
check(set_a.union(&set_c), 0..6);
|
||||
check(set_c.union(&set_a), 0..6);
|
||||
|
||||
check(set_c.difference(&set_d), 0..3);
|
||||
check(set_d.difference(&set_c), (6..9).rev());
|
||||
check(
|
||||
set_c.symmetric_difference(&set_d),
|
||||
(0..3).chain((6..9).rev()),
|
||||
);
|
||||
check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3));
|
||||
check(set_c.intersection(&set_d), 3..6);
|
||||
check(set_d.intersection(&set_c), (3..6).rev());
|
||||
check(set_c.union(&set_d), (0..6).chain((6..9).rev()));
|
||||
check(set_d.union(&set_c), (3..9).rev().chain(0..3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ops() {
|
||||
let empty = IndexSet::<i32>::new();
|
||||
let set_a: IndexSet<_> = (0..3).collect();
|
||||
let set_b: IndexSet<_> = (3..6).collect();
|
||||
let set_c: IndexSet<_> = (0..6).collect();
|
||||
let set_d: IndexSet<_> = (3..9).rev().collect();
|
||||
|
||||
#[allow(clippy::eq_op)]
|
||||
{
|
||||
assert_eq!(&set_a & &set_a, set_a);
|
||||
assert_eq!(&set_a | &set_a, set_a);
|
||||
assert_eq!(&set_a ^ &set_a, empty);
|
||||
assert_eq!(&set_a - &set_a, empty);
|
||||
}
|
||||
|
||||
assert_eq!(&set_a & &set_b, empty);
|
||||
assert_eq!(&set_b & &set_a, empty);
|
||||
assert_eq!(&set_a | &set_b, set_c);
|
||||
assert_eq!(&set_b | &set_a, set_c);
|
||||
assert_eq!(&set_a ^ &set_b, set_c);
|
||||
assert_eq!(&set_b ^ &set_a, set_c);
|
||||
assert_eq!(&set_a - &set_b, set_a);
|
||||
assert_eq!(&set_b - &set_a, set_b);
|
||||
|
||||
assert_eq!(&set_a & &set_c, set_a);
|
||||
assert_eq!(&set_c & &set_a, set_a);
|
||||
assert_eq!(&set_a | &set_c, set_c);
|
||||
assert_eq!(&set_c | &set_a, set_c);
|
||||
assert_eq!(&set_a ^ &set_c, set_b);
|
||||
assert_eq!(&set_c ^ &set_a, set_b);
|
||||
assert_eq!(&set_a - &set_c, empty);
|
||||
assert_eq!(&set_c - &set_a, set_b);
|
||||
|
||||
assert_eq!(&set_c & &set_d, set_b);
|
||||
assert_eq!(&set_d & &set_c, set_b);
|
||||
assert_eq!(&set_c | &set_d, &set_a | &set_d);
|
||||
assert_eq!(&set_d | &set_c, &set_a | &set_d);
|
||||
assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b));
|
||||
assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b));
|
||||
assert_eq!(&set_c - &set_d, set_a);
|
||||
assert_eq!(&set_d - &set_c, &set_d - &set_b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "std")]
|
||||
fn from_array() {
|
||||
let set1 = IndexSet::from([1, 2, 3, 4]);
|
||||
let set2: IndexSet<_> = [1, 2, 3, 4].into();
|
||||
|
||||
assert_eq!(set1, set2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter_default() {
|
||||
struct Item;
|
||||
fn assert_default<T>()
|
||||
where
|
||||
T: Default + Iterator,
|
||||
{
|
||||
assert!(T::default().next().is_none());
|
||||
}
|
||||
assert_default::<Iter<'static, Item>>();
|
||||
assert_default::<IntoIter<Item>>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_by() {
|
||||
// adapted from std's test for binary_search
|
||||
let b: IndexSet<i32> = [].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(0));
|
||||
|
||||
let b: IndexSet<i32> = [4].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&3)), Err(0));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Ok(0));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(1));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 6, 8, 9].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(4));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(4));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 5, 6, 8].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&9)), Err(6));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 6, 7, 8, 9].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(5));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 5, 6, 8, 9].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(5));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0));
|
||||
|
||||
let b: IndexSet<i32> = [1, 3, 3, 3, 7].into();
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&1)), Ok(0));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&2)), Err(1));
|
||||
// diff from std as set merges the duplicate keys
|
||||
assert!(match b.binary_search_by(|x| x.cmp(&3)) {
|
||||
Ok(1..=2) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert!(match b.binary_search_by(|x| x.cmp(&3)) {
|
||||
Ok(1..=2) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Err(2));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(2));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Err(2));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Ok(2));
|
||||
assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Err(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_binary_search_by_key() {
|
||||
// adapted from std's test for binary_search
|
||||
let b: IndexSet<i32> = [].into();
|
||||
assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(0));
|
||||
|
||||
let b: IndexSet<i32> = [4].into();
|
||||
assert_eq!(b.binary_search_by_key(&3, |&x| x), Err(0));
|
||||
assert_eq!(b.binary_search_by_key(&4, |&x| x), Ok(0));
|
||||
assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(1));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 6, 8, 9].into();
|
||||
assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3));
|
||||
assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3));
|
||||
assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(4));
|
||||
assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(4));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 5, 6, 8].into();
|
||||
assert_eq!(b.binary_search_by_key(&9, |&x| x), Err(6));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 6, 7, 8, 9].into();
|
||||
assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3));
|
||||
assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3));
|
||||
assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(5));
|
||||
|
||||
let b: IndexSet<i32> = [1, 2, 4, 5, 6, 8, 9].into();
|
||||
assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(5));
|
||||
assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0));
|
||||
|
||||
let b: IndexSet<i32> = [1, 3, 3, 3, 7].into();
|
||||
assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0));
|
||||
assert_eq!(b.binary_search_by_key(&1, |&x| x), Ok(0));
|
||||
assert_eq!(b.binary_search_by_key(&2, |&x| x), Err(1));
|
||||
// diff from std as set merges the duplicate keys
|
||||
assert!(match b.binary_search_by_key(&3, |&x| x) {
|
||||
Ok(1..=2) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert!(match b.binary_search_by_key(&3, |&x| x) {
|
||||
Ok(1..=2) => true,
|
||||
_ => false,
|
||||
});
|
||||
assert_eq!(b.binary_search_by_key(&4, |&x| x), Err(2));
|
||||
assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(2));
|
||||
assert_eq!(b.binary_search_by_key(&6, |&x| x), Err(2));
|
||||
assert_eq!(b.binary_search_by_key(&7, |&x| x), Ok(2));
|
||||
assert_eq!(b.binary_search_by_key(&8, |&x| x), Err(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_partition_point() {
|
||||
// adapted from std's test for partition_point
|
||||
let b: IndexSet<i32> = [].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 5), 0);
|
||||
|
||||
let b: IndexSet<_> = [4].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 3), 0);
|
||||
assert_eq!(b.partition_point(|&x| x < 4), 0);
|
||||
assert_eq!(b.partition_point(|&x| x < 5), 1);
|
||||
|
||||
let b: IndexSet<_> = [1, 2, 4, 6, 8, 9].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 5), 3);
|
||||
assert_eq!(b.partition_point(|&x| x < 6), 3);
|
||||
assert_eq!(b.partition_point(|&x| x < 7), 4);
|
||||
assert_eq!(b.partition_point(|&x| x < 8), 4);
|
||||
|
||||
let b: IndexSet<_> = [1, 2, 4, 5, 6, 8].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 9), 6);
|
||||
|
||||
let b: IndexSet<_> = [1, 2, 4, 6, 7, 8, 9].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 6), 3);
|
||||
assert_eq!(b.partition_point(|&x| x < 5), 3);
|
||||
assert_eq!(b.partition_point(|&x| x < 8), 5);
|
||||
|
||||
let b: IndexSet<_> = [1, 2, 4, 5, 6, 8, 9].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 7), 5);
|
||||
assert_eq!(b.partition_point(|&x| x < 0), 0);
|
||||
|
||||
let b: IndexSet<_> = [1, 3, 3, 3, 7].into();
|
||||
assert_eq!(b.partition_point(|&x| x < 0), 0);
|
||||
assert_eq!(b.partition_point(|&x| x < 1), 0);
|
||||
assert_eq!(b.partition_point(|&x| x < 2), 1);
|
||||
assert_eq!(b.partition_point(|&x| x < 3), 1);
|
||||
assert_eq!(b.partition_point(|&x| x < 4), 2); // diff from std as set merges the duplicate keys
|
||||
assert_eq!(b.partition_point(|&x| x < 5), 2);
|
||||
assert_eq!(b.partition_point(|&x| x < 6), 2);
|
||||
assert_eq!(b.partition_point(|&x| x < 7), 2);
|
||||
assert_eq!(b.partition_point(|&x| x < 8), 3);
|
||||
}
|
||||
53
third-party/vendor/indexmap/src/util.rs
vendored
Normal file
53
third-party/vendor/indexmap/src/util.rs
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
use core::ops::{Bound, Range, RangeBounds};
|
||||
|
||||
pub(crate) fn third<A, B, C>(t: (A, B, C)) -> C {
|
||||
t.2
|
||||
}
|
||||
|
||||
pub(crate) fn simplify_range<R>(range: R, len: usize) -> Range<usize>
|
||||
where
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
let start = match range.start_bound() {
|
||||
Bound::Unbounded => 0,
|
||||
Bound::Included(&i) if i <= len => i,
|
||||
Bound::Excluded(&i) if i < len => i + 1,
|
||||
bound => panic!("range start {:?} should be <= length {}", bound, len),
|
||||
};
|
||||
let end = match range.end_bound() {
|
||||
Bound::Unbounded => len,
|
||||
Bound::Excluded(&i) if i <= len => i,
|
||||
Bound::Included(&i) if i < len => i + 1,
|
||||
bound => panic!("range end {:?} should be <= length {}", bound, len),
|
||||
};
|
||||
if start > end {
|
||||
panic!(
|
||||
"range start {:?} should be <= range end {:?}",
|
||||
range.start_bound(),
|
||||
range.end_bound()
|
||||
);
|
||||
}
|
||||
start..end
|
||||
}
|
||||
|
||||
pub(crate) fn try_simplify_range<R>(range: R, len: usize) -> Option<Range<usize>>
|
||||
where
|
||||
R: RangeBounds<usize>,
|
||||
{
|
||||
let start = match range.start_bound() {
|
||||
Bound::Unbounded => 0,
|
||||
Bound::Included(&i) if i <= len => i,
|
||||
Bound::Excluded(&i) if i < len => i + 1,
|
||||
_ => return None,
|
||||
};
|
||||
let end = match range.end_bound() {
|
||||
Bound::Unbounded => len,
|
||||
Bound::Excluded(&i) if i <= len => i,
|
||||
Bound::Included(&i) if i < len => i + 1,
|
||||
_ => return None,
|
||||
};
|
||||
if start > end {
|
||||
return None;
|
||||
}
|
||||
Some(start..end)
|
||||
}
|
||||
53
third-party/vendor/indexmap/tests/equivalent_trait.rs
vendored
Normal file
53
third-party/vendor/indexmap/tests/equivalent_trait.rs
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
use indexmap::indexmap;
|
||||
use indexmap::Equivalent;
|
||||
|
||||
use std::hash::Hash;
|
||||
|
||||
#[derive(Debug, Hash)]
|
||||
pub struct Pair<A, B>(pub A, pub B);
|
||||
|
||||
impl<A, B, C, D> PartialEq<(A, B)> for Pair<C, D>
|
||||
where
|
||||
C: PartialEq<A>,
|
||||
D: PartialEq<B>,
|
||||
{
|
||||
fn eq(&self, rhs: &(A, B)) -> bool {
|
||||
self.0 == rhs.0 && self.1 == rhs.1
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B, X> Equivalent<X> for Pair<A, B>
|
||||
where
|
||||
Pair<A, B>: PartialEq<X>,
|
||||
A: Hash + Eq,
|
||||
B: Hash + Eq,
|
||||
{
|
||||
fn equivalent(&self, other: &X) -> bool {
|
||||
*self == *other
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lookup() {
|
||||
let s = String::from;
|
||||
let map = indexmap! {
|
||||
(s("a"), s("b")) => 1,
|
||||
(s("a"), s("x")) => 2,
|
||||
};
|
||||
|
||||
assert!(map.contains_key(&Pair("a", "b")));
|
||||
assert!(!map.contains_key(&Pair("b", "a")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_string_str() {
|
||||
let s = String::from;
|
||||
let mut map = indexmap! {
|
||||
s("a") => 1, s("b") => 2,
|
||||
s("x") => 3, s("y") => 4,
|
||||
};
|
||||
|
||||
assert!(map.contains_key("a"));
|
||||
assert!(!map.contains_key("z"));
|
||||
assert_eq!(map.swap_remove("b"), Some(2));
|
||||
}
|
||||
19
third-party/vendor/indexmap/tests/macros_full_path.rs
vendored
Normal file
19
third-party/vendor/indexmap/tests/macros_full_path.rs
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
#[test]
|
||||
fn test_create_map() {
|
||||
let _m = indexmap::indexmap! {
|
||||
1 => 2,
|
||||
7 => 1,
|
||||
2 => 2,
|
||||
3 => 3,
|
||||
};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_set() {
|
||||
let _s = indexmap::indexset! {
|
||||
1,
|
||||
7,
|
||||
2,
|
||||
3,
|
||||
};
|
||||
}
|
||||
759
third-party/vendor/indexmap/tests/quick.rs
vendored
Normal file
759
third-party/vendor/indexmap/tests/quick.rs
vendored
Normal file
|
|
@ -0,0 +1,759 @@
|
|||
use indexmap::{IndexMap, IndexSet};
|
||||
use itertools::Itertools;
|
||||
|
||||
use quickcheck::Arbitrary;
|
||||
use quickcheck::Gen;
|
||||
use quickcheck::QuickCheck;
|
||||
use quickcheck::TestResult;
|
||||
|
||||
use fnv::FnvHasher;
|
||||
use std::hash::{BuildHasher, BuildHasherDefault};
|
||||
type FnvBuilder = BuildHasherDefault<FnvHasher>;
|
||||
type IndexMapFnv<K, V> = IndexMap<K, V, FnvBuilder>;
|
||||
|
||||
use std::cmp::min;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::ops::Bound;
|
||||
use std::ops::Deref;
|
||||
|
||||
use indexmap::map::Entry;
|
||||
use std::collections::hash_map::Entry as StdEntry;
|
||||
|
||||
fn set<'a, T: 'a, I>(iter: I) -> HashSet<T>
|
||||
where
|
||||
I: IntoIterator<Item = &'a T>,
|
||||
T: Copy + Hash + Eq,
|
||||
{
|
||||
iter.into_iter().copied().collect()
|
||||
}
|
||||
|
||||
fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap<T, ()>
|
||||
where
|
||||
I: IntoIterator<Item = &'a T>,
|
||||
T: Copy + Hash + Eq,
|
||||
{
|
||||
IndexMap::from_iter(iter.into_iter().copied().map(|k| (k, ())))
|
||||
}
|
||||
|
||||
// Helper macro to allow us to use smaller quickcheck limits under miri.
|
||||
macro_rules! quickcheck_limit {
|
||||
(@as_items $($i:item)*) => ($($i)*);
|
||||
{
|
||||
$(
|
||||
$(#[$m:meta])*
|
||||
fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty {
|
||||
$($code:tt)*
|
||||
}
|
||||
)*
|
||||
} => (
|
||||
quickcheck::quickcheck! {
|
||||
@as_items
|
||||
$(
|
||||
#[test]
|
||||
$(#[$m])*
|
||||
fn $fn_name() {
|
||||
fn prop($($arg_name: $arg_ty),*) -> $ret {
|
||||
$($code)*
|
||||
}
|
||||
let mut quickcheck = QuickCheck::new();
|
||||
if cfg!(miri) {
|
||||
quickcheck = quickcheck
|
||||
.gen(Gen::new(10))
|
||||
.tests(10)
|
||||
.max_tests(100);
|
||||
}
|
||||
|
||||
quickcheck.quickcheck(prop as fn($($arg_ty),*) -> $ret);
|
||||
}
|
||||
)*
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
quickcheck_limit! {
|
||||
fn contains(insert: Vec<u32>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
insert.iter().all(|&key| map.get(&key).is_some())
|
||||
}
|
||||
|
||||
fn contains_not(insert: Vec<u8>, not: Vec<u8>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
let nots = &set(¬) - &set(&insert);
|
||||
nots.iter().all(|&key| map.get(&key).is_none())
|
||||
}
|
||||
|
||||
fn insert_remove(insert: Vec<u8>, remove: Vec<u8>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
for &key in &remove {
|
||||
map.swap_remove(&key);
|
||||
}
|
||||
let elements = &set(&insert) - &set(&remove);
|
||||
map.len() == elements.len() && map.iter().count() == elements.len() &&
|
||||
elements.iter().all(|k| map.get(k).is_some())
|
||||
}
|
||||
|
||||
fn insertion_order(insert: Vec<u32>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
itertools::assert_equal(insert.iter().unique(), map.keys());
|
||||
true
|
||||
}
|
||||
|
||||
fn insert_sorted(insert: Vec<(u32, u32)>) -> bool {
|
||||
let mut hmap = HashMap::new();
|
||||
let mut map = IndexMap::new();
|
||||
let mut map2 = IndexMap::new();
|
||||
for &(key, value) in &insert {
|
||||
hmap.insert(key, value);
|
||||
map.insert_sorted(key, value);
|
||||
match map2.entry(key) {
|
||||
Entry::Occupied(e) => *e.into_mut() = value,
|
||||
Entry::Vacant(e) => { e.insert_sorted(value); }
|
||||
}
|
||||
}
|
||||
itertools::assert_equal(hmap.iter().sorted(), &map);
|
||||
itertools::assert_equal(&map, &map2);
|
||||
true
|
||||
}
|
||||
|
||||
fn pop(insert: Vec<u8>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
let mut pops = Vec::new();
|
||||
while let Some((key, _v)) = map.pop() {
|
||||
pops.push(key);
|
||||
}
|
||||
pops.reverse();
|
||||
|
||||
itertools::assert_equal(insert.iter().unique(), &pops);
|
||||
true
|
||||
}
|
||||
|
||||
fn with_cap(template: Vec<()>) -> bool {
|
||||
let cap = template.len();
|
||||
let map: IndexMap<u8, u8> = IndexMap::with_capacity(cap);
|
||||
println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize);
|
||||
map.capacity() >= cap
|
||||
}
|
||||
|
||||
fn drain_full(insert: Vec<u8>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
let mut clone = map.clone();
|
||||
let drained = clone.drain(..);
|
||||
for (key, _) in drained {
|
||||
map.swap_remove(&key);
|
||||
}
|
||||
map.is_empty()
|
||||
}
|
||||
|
||||
fn drain_bounds(insert: Vec<u8>, range: (Bound<usize>, Bound<usize>)) -> TestResult {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
|
||||
// First see if `Vec::drain` is happy with this range.
|
||||
let result = std::panic::catch_unwind(|| {
|
||||
let mut keys: Vec<u8> = map.keys().copied().collect();
|
||||
keys.drain(range);
|
||||
keys
|
||||
});
|
||||
|
||||
if let Ok(keys) = result {
|
||||
map.drain(range);
|
||||
// Check that our `drain` matches the same key order.
|
||||
assert!(map.keys().eq(&keys));
|
||||
// Check that hash lookups all work too.
|
||||
assert!(keys.iter().all(|key| map.contains_key(key)));
|
||||
TestResult::passed()
|
||||
} else {
|
||||
// If `Vec::drain` panicked, so should we.
|
||||
TestResult::must_fail(move || { map.drain(range); })
|
||||
}
|
||||
}
|
||||
|
||||
fn shift_remove(insert: Vec<u8>, remove: Vec<u8>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
for &key in &insert {
|
||||
map.insert(key, ());
|
||||
}
|
||||
for &key in &remove {
|
||||
map.shift_remove(&key);
|
||||
}
|
||||
let elements = &set(&insert) - &set(&remove);
|
||||
|
||||
// Check that order is preserved after removals
|
||||
let mut iter = map.keys();
|
||||
for &key in insert.iter().unique() {
|
||||
if elements.contains(&key) {
|
||||
assert_eq!(Some(&key), iter.next());
|
||||
}
|
||||
}
|
||||
|
||||
map.len() == elements.len() && map.iter().count() == elements.len() &&
|
||||
elements.iter().all(|k| map.get(k).is_some())
|
||||
}
|
||||
|
||||
fn indexing(insert: Vec<u8>) -> bool {
|
||||
let mut map: IndexMap<_, _> = insert.into_iter().map(|x| (x, x)).collect();
|
||||
let set: IndexSet<_> = map.keys().copied().collect();
|
||||
assert_eq!(map.len(), set.len());
|
||||
|
||||
for (i, &key) in set.iter().enumerate() {
|
||||
assert_eq!(map.get_index(i), Some((&key, &key)));
|
||||
assert_eq!(set.get_index(i), Some(&key));
|
||||
assert_eq!(map[i], key);
|
||||
assert_eq!(set[i], key);
|
||||
|
||||
*map.get_index_mut(i).unwrap().1 >>= 1;
|
||||
map[i] <<= 1;
|
||||
}
|
||||
|
||||
set.iter().enumerate().all(|(i, &key)| {
|
||||
let value = key & !1;
|
||||
map[&key] == value && map[i] == value
|
||||
})
|
||||
}
|
||||
|
||||
// Use `u8` test indices so quickcheck is less likely to go out of bounds.
|
||||
fn set_swap_indices(vec: Vec<u8>, a: u8, b: u8) -> TestResult {
|
||||
let mut set = IndexSet::<u8>::from_iter(vec);
|
||||
let a = usize::from(a);
|
||||
let b = usize::from(b);
|
||||
|
||||
if a >= set.len() || b >= set.len() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut vec = Vec::from_iter(set.iter().cloned());
|
||||
vec.swap(a, b);
|
||||
|
||||
set.swap_indices(a, b);
|
||||
|
||||
// Check both iteration order and hash lookups
|
||||
assert!(set.iter().eq(vec.iter()));
|
||||
assert!(vec.iter().enumerate().all(|(i, x)| {
|
||||
set.get_index_of(x) == Some(i)
|
||||
}));
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
fn map_swap_indices(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
test_map_swap_indices(vec, from, to, IndexMap::swap_indices)
|
||||
}
|
||||
|
||||
fn occupied_entry_swap_indices(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
test_map_swap_indices(vec, from, to, |map, from, to| {
|
||||
let key = map.keys()[from];
|
||||
match map.entry(key) {
|
||||
Entry::Occupied(entry) => entry.swap_indices(to),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn indexed_entry_swap_indices(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
test_map_swap_indices(vec, from, to, |map, from, to| {
|
||||
map.get_index_entry(from).unwrap().swap_indices(to);
|
||||
})
|
||||
}
|
||||
|
||||
fn raw_occupied_entry_swap_indices(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut};
|
||||
test_map_swap_indices(vec, from, to, |map, from, to| {
|
||||
let key = map.keys()[from];
|
||||
match map.raw_entry_mut_v1().from_key(&key) {
|
||||
RawEntryMut::Occupied(entry) => entry.swap_indices(to),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Use `u8` test indices so quickcheck is less likely to go out of bounds.
|
||||
fn set_move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
let mut set = IndexSet::<u8>::from_iter(vec);
|
||||
let from = usize::from(from);
|
||||
let to = usize::from(to);
|
||||
|
||||
if from >= set.len() || to >= set.len() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut vec = Vec::from_iter(set.iter().cloned());
|
||||
let x = vec.remove(from);
|
||||
vec.insert(to, x);
|
||||
|
||||
set.move_index(from, to);
|
||||
|
||||
// Check both iteration order and hash lookups
|
||||
assert!(set.iter().eq(vec.iter()));
|
||||
assert!(vec.iter().enumerate().all(|(i, x)| {
|
||||
set.get_index_of(x) == Some(i)
|
||||
}));
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
fn map_move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
test_map_move_index(vec, from, to, IndexMap::move_index)
|
||||
}
|
||||
|
||||
fn occupied_entry_move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
test_map_move_index(vec, from, to, |map, from, to| {
|
||||
let key = map.keys()[from];
|
||||
match map.entry(key) {
|
||||
Entry::Occupied(entry) => entry.move_index(to),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn indexed_entry_move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
test_map_move_index(vec, from, to, |map, from, to| {
|
||||
map.get_index_entry(from).unwrap().move_index(to);
|
||||
})
|
||||
}
|
||||
|
||||
fn raw_occupied_entry_move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
|
||||
use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut};
|
||||
test_map_move_index(vec, from, to, |map, from, to| {
|
||||
let key = map.keys()[from];
|
||||
match map.raw_entry_mut_v1().from_key(&key) {
|
||||
RawEntryMut::Occupied(entry) => entry.move_index(to),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn occupied_entry_shift_insert(vec: Vec<u8>, i: u8) -> TestResult {
|
||||
test_map_shift_insert(vec, i, |map, i, key| {
|
||||
match map.entry(key) {
|
||||
Entry::Vacant(entry) => entry.shift_insert(i, ()),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
})
|
||||
}
|
||||
|
||||
fn raw_occupied_entry_shift_insert(vec: Vec<u8>, i: u8) -> TestResult {
|
||||
use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut};
|
||||
test_map_shift_insert(vec, i, |map, i, key| {
|
||||
match map.raw_entry_mut_v1().from_key(&key) {
|
||||
RawEntryMut::Vacant(entry) => entry.shift_insert(i, key, ()),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn test_map_swap_indices<F>(vec: Vec<u8>, a: u8, b: u8, swap_indices: F) -> TestResult
|
||||
where
|
||||
F: FnOnce(&mut IndexMap<u8, ()>, usize, usize),
|
||||
{
|
||||
let mut map = IndexMap::<u8, ()>::from_iter(vec.into_iter().map(|k| (k, ())));
|
||||
let a = usize::from(a);
|
||||
let b = usize::from(b);
|
||||
|
||||
if a >= map.len() || b >= map.len() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut vec = Vec::from_iter(map.keys().copied());
|
||||
vec.swap(a, b);
|
||||
|
||||
swap_indices(&mut map, a, b);
|
||||
|
||||
// Check both iteration order and hash lookups
|
||||
assert!(map.keys().eq(vec.iter()));
|
||||
assert!(vec
|
||||
.iter()
|
||||
.enumerate()
|
||||
.all(|(i, x)| { map.get_index_of(x) == Some(i) }));
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
fn test_map_move_index<F>(vec: Vec<u8>, from: u8, to: u8, move_index: F) -> TestResult
|
||||
where
|
||||
F: FnOnce(&mut IndexMap<u8, ()>, usize, usize),
|
||||
{
|
||||
let mut map = IndexMap::<u8, ()>::from_iter(vec.into_iter().map(|k| (k, ())));
|
||||
let from = usize::from(from);
|
||||
let to = usize::from(to);
|
||||
|
||||
if from >= map.len() || to >= map.len() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut vec = Vec::from_iter(map.keys().copied());
|
||||
let x = vec.remove(from);
|
||||
vec.insert(to, x);
|
||||
|
||||
move_index(&mut map, from, to);
|
||||
|
||||
// Check both iteration order and hash lookups
|
||||
assert!(map.keys().eq(vec.iter()));
|
||||
assert!(vec
|
||||
.iter()
|
||||
.enumerate()
|
||||
.all(|(i, x)| { map.get_index_of(x) == Some(i) }));
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
fn test_map_shift_insert<F>(vec: Vec<u8>, i: u8, shift_insert: F) -> TestResult
|
||||
where
|
||||
F: FnOnce(&mut IndexMap<u8, ()>, usize, u8),
|
||||
{
|
||||
let mut map = IndexMap::<u8, ()>::from_iter(vec.into_iter().map(|k| (k, ())));
|
||||
let i = usize::from(i);
|
||||
if i >= map.len() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut vec = Vec::from_iter(map.keys().copied());
|
||||
let x = vec.pop().unwrap();
|
||||
vec.insert(i, x);
|
||||
|
||||
let (last, ()) = map.pop().unwrap();
|
||||
assert_eq!(x, last);
|
||||
map.shrink_to_fit(); // so we might have to grow and rehash the table
|
||||
|
||||
shift_insert(&mut map, i, last);
|
||||
|
||||
// Check both iteration order and hash lookups
|
||||
assert!(map.keys().eq(vec.iter()));
|
||||
assert!(vec
|
||||
.iter()
|
||||
.enumerate()
|
||||
.all(|(i, x)| { map.get_index_of(x) == Some(i) }));
|
||||
TestResult::passed()
|
||||
}
|
||||
|
||||
use crate::Op::*;
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
enum Op<K, V> {
|
||||
Add(K, V),
|
||||
Remove(K),
|
||||
AddEntry(K, V),
|
||||
RemoveEntry(K),
|
||||
}
|
||||
|
||||
impl<K, V> Arbitrary for Op<K, V>
|
||||
where
|
||||
K: Arbitrary,
|
||||
V: Arbitrary,
|
||||
{
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
match u32::arbitrary(g) % 4 {
|
||||
0 => Add(K::arbitrary(g), V::arbitrary(g)),
|
||||
1 => AddEntry(K::arbitrary(g), V::arbitrary(g)),
|
||||
2 => Remove(K::arbitrary(g)),
|
||||
_ => RemoveEntry(K::arbitrary(g)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_ops<K, V, S>(ops: &[Op<K, V>], a: &mut IndexMap<K, V, S>, b: &mut HashMap<K, V>)
|
||||
where
|
||||
K: Hash + Eq + Clone,
|
||||
V: Clone,
|
||||
S: BuildHasher,
|
||||
{
|
||||
for op in ops {
|
||||
match *op {
|
||||
Add(ref k, ref v) => {
|
||||
a.insert(k.clone(), v.clone());
|
||||
b.insert(k.clone(), v.clone());
|
||||
}
|
||||
AddEntry(ref k, ref v) => {
|
||||
a.entry(k.clone()).or_insert_with(|| v.clone());
|
||||
b.entry(k.clone()).or_insert_with(|| v.clone());
|
||||
}
|
||||
Remove(ref k) => {
|
||||
a.swap_remove(k);
|
||||
b.remove(k);
|
||||
}
|
||||
RemoveEntry(ref k) => {
|
||||
if let Entry::Occupied(ent) = a.entry(k.clone()) {
|
||||
ent.swap_remove_entry();
|
||||
}
|
||||
if let StdEntry::Occupied(ent) = b.entry(k.clone()) {
|
||||
ent.remove_entry();
|
||||
}
|
||||
}
|
||||
}
|
||||
//println!("{:?}", a);
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_maps_equivalent<K, V>(a: &IndexMap<K, V>, b: &HashMap<K, V>) -> bool
|
||||
where
|
||||
K: Hash + Eq + Debug,
|
||||
V: Eq + Debug,
|
||||
{
|
||||
assert_eq!(a.len(), b.len());
|
||||
assert_eq!(a.iter().next().is_some(), b.iter().next().is_some());
|
||||
for key in a.keys() {
|
||||
assert!(b.contains_key(key), "b does not contain {:?}", key);
|
||||
}
|
||||
for key in b.keys() {
|
||||
assert!(a.get(key).is_some(), "a does not contain {:?}", key);
|
||||
}
|
||||
for key in a.keys() {
|
||||
assert_eq!(a[key], b[key]);
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
quickcheck_limit! {
|
||||
fn operations_i8(ops: Large<Vec<Op<i8, i8>>>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
let mut reference = HashMap::new();
|
||||
do_ops(&ops, &mut map, &mut reference);
|
||||
assert_maps_equivalent(&map, &reference)
|
||||
}
|
||||
|
||||
fn operations_string(ops: Vec<Op<Alpha, i8>>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
let mut reference = HashMap::new();
|
||||
do_ops(&ops, &mut map, &mut reference);
|
||||
assert_maps_equivalent(&map, &reference)
|
||||
}
|
||||
|
||||
fn keys_values(ops: Large<Vec<Op<i8, i8>>>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
let mut reference = HashMap::new();
|
||||
do_ops(&ops, &mut map, &mut reference);
|
||||
let mut visit = IndexMap::new();
|
||||
for (k, v) in map.keys().zip(map.values()) {
|
||||
assert_eq!(&map[k], v);
|
||||
assert!(!visit.contains_key(k));
|
||||
visit.insert(*k, *v);
|
||||
}
|
||||
assert_eq!(visit.len(), reference.len());
|
||||
true
|
||||
}
|
||||
|
||||
fn keys_values_mut(ops: Large<Vec<Op<i8, i8>>>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
let mut reference = HashMap::new();
|
||||
do_ops(&ops, &mut map, &mut reference);
|
||||
let mut visit = IndexMap::new();
|
||||
let keys = Vec::from_iter(map.keys().copied());
|
||||
for (k, v) in keys.iter().zip(map.values_mut()) {
|
||||
assert_eq!(&reference[k], v);
|
||||
assert!(!visit.contains_key(k));
|
||||
visit.insert(*k, *v);
|
||||
}
|
||||
assert_eq!(visit.len(), reference.len());
|
||||
true
|
||||
}
|
||||
|
||||
fn equality(ops1: Vec<Op<i8, i8>>, removes: Vec<usize>) -> bool {
|
||||
let mut map = IndexMap::new();
|
||||
let mut reference = HashMap::new();
|
||||
do_ops(&ops1, &mut map, &mut reference);
|
||||
let mut ops2 = ops1.clone();
|
||||
for &r in &removes {
|
||||
if !ops2.is_empty() {
|
||||
let i = r % ops2.len();
|
||||
ops2.remove(i);
|
||||
}
|
||||
}
|
||||
let mut map2 = IndexMapFnv::default();
|
||||
let mut reference2 = HashMap::new();
|
||||
do_ops(&ops2, &mut map2, &mut reference2);
|
||||
assert_eq!(map == map2, reference == reference2);
|
||||
true
|
||||
}
|
||||
|
||||
fn retain_ordered(keys: Large<Vec<i8>>, remove: Large<Vec<i8>>) -> () {
|
||||
let mut map = indexmap(keys.iter());
|
||||
let initial_map = map.clone(); // deduplicated in-order input
|
||||
let remove_map = indexmap(remove.iter());
|
||||
let keys_s = set(keys.iter());
|
||||
let remove_s = set(remove.iter());
|
||||
let answer = &keys_s - &remove_s;
|
||||
map.retain(|k, _| !remove_map.contains_key(k));
|
||||
|
||||
// check the values
|
||||
assert_eq!(map.len(), answer.len());
|
||||
for key in &answer {
|
||||
assert!(map.contains_key(key));
|
||||
}
|
||||
// check the order
|
||||
itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k)));
|
||||
}
|
||||
|
||||
fn sort_1(keyvals: Large<Vec<(i8, i8)>>) -> () {
|
||||
let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
|
||||
let mut answer = keyvals.0;
|
||||
answer.sort_by_key(|t| t.0);
|
||||
|
||||
// reverse dedup: Because IndexMap::from_iter keeps the last value for
|
||||
// identical keys
|
||||
answer.reverse();
|
||||
answer.dedup_by_key(|t| t.0);
|
||||
answer.reverse();
|
||||
|
||||
map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2));
|
||||
|
||||
// check it contains all the values it should
|
||||
for &(key, val) in &answer {
|
||||
assert_eq!(map[&key], val);
|
||||
}
|
||||
|
||||
// check the order
|
||||
|
||||
let mapv = Vec::from_iter(map);
|
||||
assert_eq!(answer, mapv);
|
||||
|
||||
}
|
||||
|
||||
fn sort_2(keyvals: Large<Vec<(i8, i8)>>) -> () {
|
||||
let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
|
||||
map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2));
|
||||
assert_sorted_by_key(map, |t| t.1);
|
||||
}
|
||||
|
||||
fn sort_3(keyvals: Large<Vec<(i8, i8)>>) -> () {
|
||||
let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
|
||||
map.sort_by_cached_key(|&k, _| std::cmp::Reverse(k));
|
||||
assert_sorted_by_key(map, |t| std::cmp::Reverse(t.0));
|
||||
}
|
||||
|
||||
fn reverse(keyvals: Large<Vec<(i8, i8)>>) -> () {
|
||||
let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
|
||||
|
||||
fn generate_answer(input: &Vec<(i8, i8)>) -> Vec<(i8, i8)> {
|
||||
// to mimic what `IndexMap::from_iter` does:
|
||||
// need to get (A) the unique keys in forward order, and (B) the
|
||||
// last value of each of those keys.
|
||||
|
||||
// create (A): an iterable that yields the unique keys in ltr order
|
||||
let mut seen_keys = HashSet::new();
|
||||
let unique_keys_forward = input.iter().filter_map(move |(k, _)| {
|
||||
if seen_keys.contains(k) { None }
|
||||
else { seen_keys.insert(*k); Some(*k) }
|
||||
});
|
||||
|
||||
// create (B): a mapping of keys to the last value seen for that key
|
||||
// this is the same as reversing the input and taking the first
|
||||
// value seen for that key!
|
||||
let mut last_val_per_key = HashMap::new();
|
||||
for &(k, v) in input.iter().rev() {
|
||||
if !last_val_per_key.contains_key(&k) {
|
||||
last_val_per_key.insert(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
// iterate over the keys in (A) in order, and match each one with
|
||||
// the corresponding last value from (B)
|
||||
let mut ans: Vec<_> = unique_keys_forward
|
||||
.map(|k| (k, *last_val_per_key.get(&k).unwrap()))
|
||||
.collect();
|
||||
|
||||
// finally, since this test is testing `.reverse()`, reverse the
|
||||
// answer in-place
|
||||
ans.reverse();
|
||||
|
||||
ans
|
||||
}
|
||||
|
||||
let answer = generate_answer(&keyvals.0);
|
||||
|
||||
// perform the work
|
||||
map.reverse();
|
||||
|
||||
// check it contains all the values it should
|
||||
for &(key, val) in &answer {
|
||||
assert_eq!(map[&key], val);
|
||||
}
|
||||
|
||||
// check the order
|
||||
let mapv = Vec::from_iter(map);
|
||||
assert_eq!(answer, mapv);
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_sorted_by_key<I, Key, X>(iterable: I, key: Key)
|
||||
where
|
||||
I: IntoIterator,
|
||||
I::Item: Ord + Clone + Debug,
|
||||
Key: Fn(&I::Item) -> X,
|
||||
X: Ord,
|
||||
{
|
||||
let input = Vec::from_iter(iterable);
|
||||
let mut sorted = input.clone();
|
||||
sorted.sort_by_key(key);
|
||||
assert_eq!(input, sorted);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
struct Alpha(String);
|
||||
|
||||
impl Deref for Alpha {
|
||||
type Target = String;
|
||||
fn deref(&self) -> &String {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
const ALPHABET: &[u8] = b"abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
impl Arbitrary for Alpha {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let len = usize::arbitrary(g) % g.size();
|
||||
let len = min(len, 16);
|
||||
Alpha(
|
||||
(0..len)
|
||||
.map(|_| ALPHABET[usize::arbitrary(g) % ALPHABET.len()] as char)
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
Box::new((**self).shrink().map(Alpha))
|
||||
}
|
||||
}
|
||||
|
||||
/// quickcheck Arbitrary adaptor -- make a larger vec
|
||||
#[derive(Clone, Debug)]
|
||||
struct Large<T>(T);
|
||||
|
||||
impl<T> Deref for Large<T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Arbitrary for Large<Vec<T>>
|
||||
where
|
||||
T: Arbitrary,
|
||||
{
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let len = usize::arbitrary(g) % (g.size() * 10);
|
||||
Large((0..len).map(|_| T::arbitrary(g)).collect())
|
||||
}
|
||||
|
||||
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
|
||||
Box::new((**self).shrink().map(Large))
|
||||
}
|
||||
}
|
||||
28
third-party/vendor/indexmap/tests/tests.rs
vendored
Normal file
28
third-party/vendor/indexmap/tests/tests.rs
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
use indexmap::{indexmap, indexset};
|
||||
|
||||
#[test]
|
||||
fn test_sort() {
|
||||
let m = indexmap! {
|
||||
1 => 2,
|
||||
7 => 1,
|
||||
2 => 2,
|
||||
3 => 3,
|
||||
};
|
||||
|
||||
itertools::assert_equal(
|
||||
m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)),
|
||||
vec![(7, 1), (1, 2), (2, 2), (3, 3)],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sort_set() {
|
||||
let s = indexset! {
|
||||
1,
|
||||
7,
|
||||
2,
|
||||
3,
|
||||
};
|
||||
|
||||
itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue