[fine] Remove salsa dependency

This commit is contained in:
John Doty 2024-04-07 07:00:02 -07:00
parent 228ca719f0
commit a2b3e8b74d
231 changed files with 0 additions and 41310 deletions

109
Cargo.lock generated
View file

@ -109,12 +109,6 @@ version = "1.0.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1"
[[package]]
name = "arc-swap"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f"
[[package]]
name = "arrayref"
version = "0.3.7"
@ -442,19 +436,6 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "crossbeam"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-epoch",
"crossbeam-queue",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.12"
@ -464,34 +445,6 @@ dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-queue"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
@ -689,16 +642,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "eyre"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec"
dependencies = [
"indenter",
"once_cell",
]
[[package]]
name = "fdeflate"
version = "0.3.4"
@ -729,7 +672,6 @@ dependencies = [
"prettyplease",
"proc-macro2",
"quote",
"salsa-2022",
"syn 2.0.52",
"thiserror",
"unicode-width",
@ -962,15 +904,6 @@ dependencies = [
"allocator-api2",
]
[[package]]
name = "hashlink"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
dependencies = [
"hashbrown 0.14.3",
]
[[package]]
name = "hassle-rs"
version = "0.10.0"
@ -986,12 +919,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
version = "0.3.9"
@ -1048,12 +975,6 @@ dependencies = [
"png",
]
[[package]]
name = "indenter"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683"
[[package]]
name = "indexmap"
version = "1.9.3"
@ -2064,36 +1985,6 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "salsa-2022"
version = "0.1.0"
source = "git+https://github.com/salsa-rs/salsa.git#f1d318a2795e87081e316a5f31ef02f0cd6a1c83"
dependencies = [
"arc-swap",
"crossbeam",
"crossbeam-utils",
"dashmap",
"hashlink",
"indexmap 2.2.5",
"log",
"parking_lot",
"rustc-hash",
"salsa-2022-macros",
"smallvec",
]
[[package]]
name = "salsa-2022-macros"
version = "0.1.0"
source = "git+https://github.com/salsa-rs/salsa.git#f1d318a2795e87081e316a5f31ef02f0cd6a1c83"
dependencies = [
"eyre",
"heck",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "same-file"
version = "1.0.6"

View file

@ -16,4 +16,3 @@ syn = "2.0.47"
[dependencies]
thiserror = "1.0.56"
unicode-width = "=0.1.11"
salsa = { git = "https://github.com/salsa-rs/salsa.git", package = "salsa-2022" }

View file

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"c63f740f694a274796862123637146dcc5aceb999eb157e76ec3d6333ed5b7f1","Cargo.toml":"5cb99f81b29e58171a4e3c44440fe25a0a79e09dc8c569ff4a3bd29dab55f322","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff3f1cd12af8866d9bde961d6cc40df774cd131d484de31d3170c4b02b21a7b5","README.md":"51b800d6d0fe42f855dfbd3f8a0e401a8bb276ca52dcec79e2788e64122c484d","TODO":"788f7b1ad8fea31a5ec1b1079f43a23913e615247a92acbd37c0466027c233fe","benches/background.rs":"5f08673a4b5e7935a8ceff2893c88a2de355b5925333b5949ff5f4bc44bcb22e","benches/int-access.rs":"ca730792fd171ac10bcb968af2d87286c1e469b8f2c6abccd2c6d42c72d315eb","benches/track.rs":"785ad6ffc0152b117562312404f75df97ea169e4b07fa8ec9a9595cd30b3fee4","ci-check.sh":"f1ad7ffbb2e996c50633a9168cdd45760af03c4bb9aaf2a4d60b45f41f9413a8","rustfmt.toml":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","src/access.rs":"284d0d54f45570ccb5d2197c2d5bffe502e88d4c231e601ab70a1f3772ac009c","src/as_raw.rs":"be4cee8ef22ba5125af05d50b93f992531faea17991384f3690d2c481110f087","src/cache.rs":"25b753fbb53c4addaeccae25ee12ad7b77d70ade1af73ca02846797b541872be","src/compile_fail_tests.rs":"4f68cecb0406b0062e01b86879643e26804ae96a04ba0ca6f63caacefae8e567","src/debt/fast.rs":"7ca64acd7f2942707693b6e7476dce7e1bbbcc007129da11142f119fa86e29de","src/debt/helping.rs":"0e0974ba6813a5d28fa974f6bfd9e1c3e35cd480252a7193d091e0a1ff153c03","src/debt/list.rs":"c67fcbfc4fec9158273d0e6b6f1e4168c0e74107516695f43bc374a67968b612","src/debt/mod.rs":"1aa6687a04fd552288d983c1b0ecc8e23cdf14821711f4c82b183f3b64a5628c","src/docs/internal.rs":"4f869ecd5152f45157e5bc6922d0274639cfb389c7172402de9b48d50c26db8b","src/docs/limitations.rs":"b177c990433a8a9b01cd426911c8a57160619a3644b0b82d031484f14d9267a2","src/docs/mod.rs":"c987f5ddf7d6bdc8fa262d6574d353d92a08675e2b521a341636eb0dc129feaa","src/docs/patterns.rs":"07840de45405246fc232fd73061958bd7cb0997150271fd98e153ce9788da390","src/docs/performance.rs":"c9be1f43c67ef26b56f3a32a1231f18a015c83b829276e8c5a8949d0d8ef9f17","src/lib.rs":"3efa46b8bb0153130b4a25ea6779cfeee0de431cb38b6981625ebb993dd2dde1","src/ref_cnt.rs":"8b540a21cbdf7e6f5aff2441b15ca410bf3eba4dfa882aad22accda13bd91995","src/serde.rs":"b1bf117da9e37e85ae6c0bdc281ace575a2030195706d32679d6e54b27522698","src/strategy/hybrid.rs":"9d9a9d1d17d0ad5756de2724699ea7feeea662c9c02738f613d956a5ae0470ed","src/strategy/mod.rs":"0bd567b1be128919c24f057c5f770b6648a9777f44be2e83dd8b29f752144fcc","src/strategy/rw_lock.rs":"2e7717cf52283656754921c5fe0463df9928bac0855d9cf5a9d58362e858a4db","src/strategy/test_strategies.rs":"6520ad9bf6eaddcdcd39ee3392dbadd0f176b875b938b158c9e5ed632d064426","src/weak.rs":"98dc326bfdb3ca88500740f803e43cc7edbc6d858cc2924fb801ce4353124dec","tests/random.rs":"606e71a16fa9cf04f986de76f95e0cce90041ac15894e92d8ebe87e2dc71fc7c","tests/stress.rs":"0ae80d9ec294714d2295333e4c5f7a11a3c2e87bc3f107f1c5f37d7dc3dde354"},"package":"7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f"}

View file

@ -1,232 +0,0 @@
# 1.7.1
* Support for no-std builds with the `experimental-thread-local`. Needs nightly
compiler. No stability guarantees with this feature (#93).
# 1.6.0
* Fix a data race reported by MIRI.
* Avoid violating stacked borrows (AFAIK these are still experimental and not
normative, but better safe than sorry). (#80).
* The `AccessConvert` wrapper is needed less often in practice (#77).
# 1.5.1
* bug: Insufficient synchronization on weak platforms (#76).
Never observed in practice (it's suspected practical weak platforms like ARM
are still stronger than the model), but still technically UB.
* docs: Mention triomphe's `ThinArc` around the fat-pointer limitations.
# 1.5.0
* Support serde (by a feature).
# 1.4.0
* Allow const-initializing ArcSwapOption (`const_empty` method).
# 1.3.2
* More helpful description of the `AsRaw` trait (isn't implemented for owned
`Arc`/`Option<Arc>`).
# 1.3.1
* Cache doc improvements.
# 1.3.0
* Allow mapping of DynAccess.
* Fix some lints.
* Don't leave threads running in tests/doctests. It's a bad form and annoys
miri.
# 1.2.0
* Miri and 32 bit tests in CI.
* Making the writers lock-free. Soft-removing the IndependentStrategy, as it is
no longer needed (hidden and the same as the DafultStrategy).
# 1.1.0
* Fix soundness bug around access::Map. Technically a breaking change, but
unlikely to bite and breaking seems to be the least bad option. #45.
# 1.0.0
* Remove Clone implementation. People are often confused by it and it is easy to
emulate by hand in the rare case it is actually needed.
# 1.0.0-rc1
* Get rid of the `load_signal_safe`. It only complicates things and it is niche;
signal-hook-registry has its own simplified version.
* Avoid `from_ptr(as_ptr())`. Slight change in `RefCnt::inc` which technically
is API breaking change, but this one should not matter in practice.
* Extend documentation about clone behaviour.
* Few more traits for Guard (`From<T: RefCnt>`, `Default`).
* Get rid of `rcu_unwap`, the whole concept is a trap.
* Hide the whole gen lock thing.
* Introduce the `Strategy`, as a high level way to choose how exactly the
locking happens.
- Not possible to implement by downstream users just yet, or call them.
- The CaS is its own trait for flexibility.
* Adding the SimpleGenLock experimental strategy.
- Not part of stability guarantees.
# 0.4.7
* Rename the `unstable-weak` to `weak` feature. The support is now available on
1.45 (currently in beta).
# 0.4.6
* Adjust to `Weak::as_ptr` from std (the weak pointer support, relying on
unstable features).
* Support running on miri (without some optimizations), so dependencies may run
miri tests.
* Little optimization when waiting out the contention on write operations.
# 0.4.5
* Added `Guard::from_inner`.
# 0.4.4
* Top-level docs rewrite (less rambling, hopefully more readable).
# 0.4.3
* Fix the `Display` implementation on `Guard` to correctly delegate to the
underlying `Display` implementation.
# 0.4.2
* The Access functionality ability to pass a handle to subpart of held data to
somewhere with the ability to update itself.
* Mapped cache can take `FnMut` as well as `Fn`.
# 0.4.1
* Mapped caches to allow giving access to parts of config only.
# 0.4.0
* Support for Weak pointers.
* RefCnt implemented for Rc.
* Breaking: Big API cleanups.
- Peek is gone.
- Terminology of getting the data unified to `load`.
- There's only one kind of `Guard` now.
- Guard derefs to the `Arc`/`Option<Arc>` or similar.
- `Cache` got moved to top level of the crate.
- Several now unneeded semi-internal traits and trait methods got removed.
* Splitting benchmarks into a separate sub-crate.
* Minor documentation improvements.
# 0.3.11
* Prevention against UB due to dropping Guards and overflowing the guard
counter (aborting instead, such problem is very degenerate anyway and wouldn't
work in the first place).
# 0.3.10
* Tweak slot allocation to take smaller performance hit if some leases are held.
* Increase the number of lease slots per thread to 8.
* Added a cache for faster access by keeping an already loaded instance around.
# 0.3.9
* Fix Send/Sync for Guard and Lease (they were broken in the safe but
uncomfortable direction not implementing them even if they could).
# 0.3.8
* `Lease<Option<_>>::unwrap()`, `expect()` and `into_option()` for convenient
use.
# 0.3.7
* Use the correct `#[deprecated]` syntax.
# 0.3.6
* Another locking store (`PrivateSharded`) to complement the global and private
unsharded ones.
* Comparison to other crates/approaches in the docs.
# 0.3.5
* Updates to documentation, made it hopefully easier to digest.
* Added the ability to separate gen-locks of one ArcSwapAny from others.
* Some speed improvements by inlining.
* Simplified the `lease` method internally, making it faster in optimistic
cases.
# 0.3.4
* Another potentially weak ordering discovered (with even less practical effect
than the previous).
# 0.3.3
* Increased potentially weak ordering (probably without any practical effect).
# 0.3.2
* Documentation link fix.
# 0.3.1
* Few convenience constructors.
* More tests (some randomized property testing).
# 0.3.0
* `compare_and_swap` no longer takes `&Guard` as current as that is a sure way
to create a deadlock.
* Introduced `Lease` for temporary storage, which doesn't suffer from contention
like `load`, but doesn't block writes like `Guard`. The downside is it slows
down with number of held by the current thread.
* `compare_and_swap` and `rcu` uses leases.
* Made the `ArcSwap` as small as the pointer itself, by making the
shards/counters and generation ID global. This comes at a theoretical cost of
more contention when different threads use different instances.
# 0.2.0
* Added an `ArcSwapOption`, which allows storing NULL values (as None) as well
as a valid pointer.
* `compare_and_swap` accepts borrowed `Arc` as `current` and doesn't consume one
ref count.
* Sharding internal counters, to improve performance on read-mostly contented
scenarios.
* Providing `peek_signal_safe` as the only async signal safe method to use
inside signal handlers. This removes the footgun with dropping the `Arc`
returned from `load` inside a signal handler.
# 0.1.4
* The `peek` method to use the `Arc` inside without incrementing the reference
count.
* Some more (and hopefully better) benchmarks.
# 0.1.3
* Documentation fix (swap is *not* lock-free in current implementation).
# 0.1.2
* More freedom in the `rcu` and `rcu_unwrap` return types.
# 0.1.1
* `rcu` support.
* `compare_and_swap` support.
* Added some primitive benchmarks.
# 0.1.0
* Initial implementation.

View file

@ -1,91 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "arc-swap"
version = "1.7.0"
authors = ["Michal 'vorner' Vaner <vorner@vorner.cz>"]
description = "Atomically swappable Arc"
documentation = "https://docs.rs/arc-swap"
readme = "README.md"
keywords = [
"atomic",
"Arc",
]
categories = [
"data-structures",
"memory-management",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/vorner/arc-swap"
[package.metadata.docs.rs]
all-features = true
[profile.bench]
debug = 2
[[bench]]
name = "background"
harness = false
[[bench]]
name = "int-access"
harness = false
[[bench]]
name = "track"
harness = false
[dependencies.serde]
version = "1"
features = ["rc"]
optional = true
[dev-dependencies.adaptive-barrier]
version = "~1"
[dev-dependencies.criterion]
version = "~0.5"
[dev-dependencies.crossbeam-utils]
version = "~0.8"
[dev-dependencies.itertools]
version = "0.12"
[dev-dependencies.num_cpus]
version = "~1"
[dev-dependencies.once_cell]
version = "~1"
[dev-dependencies.parking_lot]
version = "~0.12"
[dev-dependencies.proptest]
version = "1"
[dev-dependencies.serde_derive]
version = "1.0.130"
[dev-dependencies.serde_test]
version = "1.0.130"
[features]
experimental-strategies = []
experimental-thread-local = []
internal-test-strategies = []
weak = []
[badges.maintenance]
status = "actively-developed"

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,25 +0,0 @@
Copyright (c) 2017 arc-swap developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -1,41 +0,0 @@
# ArcSwap
[![Actions Status](https://github.com/vorner/arc-swap/workflows/test/badge.svg)](https://github.com/vorner/arc-swap/actions)
[![codecov](https://codecov.io/gh/vorner/arc-swap/branch/master/graph/badge.svg?token=3KA3R2D9fV)](https://codecov.io/gh/vorner/arc-swap)
[![docs](https://docs.rs/arc-swap/badge.svg)](https://docs.rs/arc-swap)
This provides something similar to what `RwLock<Arc<T>>` is or what
`Atomic<Arc<T>>` would be if it existed, optimized for read-mostly write-seldom
scenarios, with consistent performance characteristics.
Read [the documentation](https://docs.rs/arc-swap) before using.
## Rust version policy
The 1. version will build on any edition 2018 capable compiler. This does not
include:
* Tests. Tests build and run on recent compilers, mostly because of
dependencies.
* Additional feature flags. Most feature flags are guaranteed to build since the
version they are introduced. Experimental features are without any guarantees.
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms
or conditions.
[`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
[`AtomicPtr`]: https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html
[`ArcSwap`]: https://docs.rs/arc-swap/*/arc_swap/type.ArcSwap.html

View file

@ -1 +0,0 @@
* A cache without the thing inside passed to load every time. Possibly with multiple cached values.

View file

@ -1,335 +0,0 @@
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use arc_swap::{ArcSwap, ArcSwapOption, Cache};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use crossbeam_utils::thread;
use once_cell::sync::Lazy;
// Mostly a leftover from earlier times, but it still allows one to tweak the number of ops per one
// iteration of the benchmark easily, so it's left in here.
const ITERS: usize = 1;
macro_rules! method {
($c: expr, $name:ident) => {{
let mut g = $c.benchmark_group(&format!("{}_{}", NAME, stringify!($name)));
noise(&mut g, "r1", 1, 0, 0, $name);
noise(&mut g, "r3", 3, 0, 0, $name);
noise(&mut g, "l1", 0, 1, 0, $name);
noise(&mut g, "l3", 0, 3, 0, $name);
noise(&mut g, "rw", 1, 0, 1, $name);
noise(&mut g, "lw", 0, 1, 1, $name);
noise(&mut g, "w2", 0, 0, 2, $name);
g.bench_function("uncontended", |b| b.iter($name));
g.finish();
}};
}
macro_rules! noise {
() => {
use criterion::measurement::Measurement;
use criterion::BenchmarkGroup;
use super::{thread, Arc, AtomicBool, Ordering, ITERS};
fn noise<M: Measurement, F: Fn()>(
g: &mut BenchmarkGroup<M>,
name: &str,
readers: usize,
leasers: usize,
writers: usize,
f: F,
) {
let flag = Arc::new(AtomicBool::new(true));
thread::scope(|s| {
for _ in 0..readers {
s.spawn(|_| {
while flag.load(Ordering::Relaxed) {
read();
}
});
}
for _ in 0..leasers {
s.spawn(|_| {
while flag.load(Ordering::Relaxed) {
lease();
}
});
}
for _ in 0..writers {
s.spawn(|_| {
while flag.load(Ordering::Relaxed) {
write();
}
});
}
g.bench_function(name, |b| b.iter(&f));
flag.store(false, Ordering::Relaxed);
})
.unwrap();
}
};
}
macro_rules! strategy {
($name: ident, $type: ty) => {
mod $name {
use super::*;
static A: Lazy<$type> = Lazy::new(|| <$type>::from_pointee(0));
const NAME: &str = stringify!($name);
fn lease() {
for _ in 0..ITERS {
black_box(**A.load());
}
}
// Leases kind of degrade in performance if there are multiple on the same thread.
fn four_leases() {
for _ in 0..ITERS {
let l1 = A.load();
let l2 = A.load();
let l3 = A.load();
let l4 = A.load();
black_box((**l1, **l2, **l3, **l4));
}
}
fn read() {
for _ in 0..ITERS {
black_box(A.load_full());
}
}
fn write() {
for _ in 0..ITERS {
black_box(A.store(Arc::new(0)));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
method!(c, lease);
method!(c, four_leases);
}
}
};
}
strategy!(arc_swap_b, ArcSwap::<usize>);
mod arc_swap_option {
use super::{black_box, ArcSwapOption, Criterion, Lazy};
static A: Lazy<ArcSwapOption<usize>> = Lazy::new(|| ArcSwapOption::from(None));
const NAME: &str = "arc_swap_option";
fn lease() {
for _ in 0..ITERS {
black_box(A.load().as_ref().map(|l| **l).unwrap_or(0));
}
}
fn read() {
for _ in 0..ITERS {
black_box(A.load_full().map(|a| -> usize { *a }).unwrap_or(0));
}
}
fn write() {
for _ in 0..ITERS {
black_box(A.store(Some(Arc::new(0))));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
method!(c, lease);
}
}
mod arc_swap_cached {
use super::{black_box, ArcSwap, Cache, Criterion, Lazy};
static A: Lazy<ArcSwap<usize>> = Lazy::new(|| ArcSwap::from_pointee(0));
const NAME: &str = "arc_swap_cached";
fn read() {
let mut cache = Cache::from(&A as &ArcSwap<usize>);
for _ in 0..ITERS {
black_box(Arc::clone(cache.load()));
}
}
fn lease() {
for _ in 0..ITERS {
black_box(**A.load());
}
}
fn write() {
for _ in 0..ITERS {
black_box(A.store(Arc::new(0)));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
}
}
mod mutex {
use super::{black_box, Criterion, Lazy, Mutex};
static M: Lazy<Mutex<Arc<usize>>> = Lazy::new(|| Mutex::new(Arc::new(0)));
const NAME: &str = "mutex";
fn lease() {
for _ in 0..ITERS {
black_box(**M.lock().unwrap());
}
}
fn read() {
for _ in 0..ITERS {
black_box(Arc::clone(&*M.lock().unwrap()));
}
}
fn write() {
for _ in 0..ITERS {
black_box(*M.lock().unwrap() = Arc::new(42));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
}
}
mod parking_mutex {
use parking_lot::Mutex as ParkingMutex;
use super::{black_box, Criterion, Lazy};
static M: Lazy<ParkingMutex<Arc<usize>>> = Lazy::new(|| ParkingMutex::new(Arc::new(0)));
const NAME: &str = "parking_mutex";
fn lease() {
for _ in 0..ITERS {
black_box(**M.lock());
}
}
fn read() {
for _ in 0..ITERS {
black_box(Arc::clone(&*M.lock()));
}
}
fn write() {
for _ in 0..ITERS {
black_box(*M.lock() = Arc::new(42));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
}
}
mod rwlock {
use std::sync::RwLock;
use super::{black_box, Criterion, Lazy};
static L: Lazy<RwLock<Arc<usize>>> = Lazy::new(|| RwLock::new(Arc::new(0)));
const NAME: &str = "rwlock";
fn lease() {
for _ in 0..ITERS {
black_box(**L.read().unwrap());
}
}
fn read() {
for _ in 0..ITERS {
black_box(Arc::clone(&*L.read().unwrap()));
}
}
fn write() {
for _ in 0..ITERS {
black_box(*L.write().unwrap() = Arc::new(42));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
}
}
mod parking_rwlock {
use parking_lot::RwLock;
use super::{black_box, Criterion, Lazy};
static L: Lazy<RwLock<Arc<usize>>> = Lazy::new(|| RwLock::new(Arc::new(0)));
const NAME: &str = "parking_rwlock";
fn lease() {
for _ in 0..ITERS {
black_box(**L.read());
}
}
fn read() {
for _ in 0..ITERS {
black_box(Arc::clone(&*L.read()));
}
}
fn write() {
for _ in 0..ITERS {
black_box(*L.write() = Arc::new(42));
}
}
noise!();
pub fn run_all(c: &mut Criterion) {
method!(c, read);
method!(c, write);
}
}
criterion_group!(
benches,
arc_swap_b::run_all,
arc_swap_option::run_all,
arc_swap_cached::run_all,
mutex::run_all,
parking_mutex::run_all,
rwlock::run_all,
parking_rwlock::run_all,
);
criterion_main!(benches);

View file

@ -1,130 +0,0 @@
//! These are very minimal benchmarks reading and writing an integer shared in
//! different ways. You can compare the times and see the characteristics.
use std::io::{self, Write};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Instant;
use arc_swap::ArcSwap;
use criterion::black_box;
use crossbeam_utils::thread;
fn test_run<R, W>(
name: &str,
read_threads: usize,
write_threads: usize,
iterations: usize,
r: R,
w: W,
) where
R: Fn() -> usize + Sync + Send,
W: Fn(usize) + Sync + Send,
{
print!(
"{:20} ({} + {}) x {}: ",
name, read_threads, write_threads, iterations
);
io::stdout().flush().unwrap();
let before = Instant::now();
thread::scope(|scope| {
for _ in 0..read_threads {
scope.spawn(|_| {
for _ in 0..iterations {
black_box(r());
}
});
}
for _ in 0..write_threads {
scope.spawn(|_| {
for i in 0..iterations {
black_box(w(i));
}
});
}
})
.unwrap();
let duration = Instant::now() - before;
println!(
"{:03}.{:03}s",
duration.as_secs(),
duration.subsec_nanos() / 100_000
);
}
fn test_round<R, W>(name: &str, iterations: usize, r: R, w: W)
where
R: Fn() -> usize + Sync + Send,
W: Fn(usize) + Sync + Send,
{
test_run(name, 1, 0, iterations, &r, &w);
test_run(name, 2, 0, iterations, &r, &w);
test_run(name, 4, 0, iterations, &r, &w);
test_run(name, 8, 0, iterations, &r, &w);
test_run(name, 1, 1, iterations, &r, &w);
test_run(name, 4, 1, iterations, &r, &w);
test_run(name, 4, 2, iterations, &r, &w);
test_run(name, 4, 4, iterations, &r, &w);
test_run(name, 8, 1, iterations, &r, &w);
test_run(name, 8, 2, iterations, &r, &w);
test_run(name, 8, 4, iterations, &r, &w);
test_run(name, 0, 1, iterations, &r, &w);
test_run(name, 0, 4, iterations, &r, &w);
}
fn main() {
let mutex = Mutex::new(42);
test_round(
"mutex",
100_000,
|| *mutex.lock().unwrap(),
|i| *mutex.lock().unwrap() = i,
);
let mutex = Mutex::new(Arc::new(42));
test_round(
"mutex-arc",
100_000,
|| **mutex.lock().unwrap(),
|i| *mutex.lock().unwrap() = Arc::new(i),
);
test_round(
"mutex-arc-clone",
100_000,
|| *Arc::clone(&*mutex.lock().unwrap()),
|i| *mutex.lock().unwrap() = Arc::new(i),
);
let lock = RwLock::new(42);
test_round(
"rw",
100_000,
|| *lock.read().unwrap(),
|i| *lock.write().unwrap() = i,
);
let lock = RwLock::new(Arc::new(42));
test_round(
"rw-arc",
100_000,
|| **lock.read().unwrap(),
|i| *lock.write().unwrap() = Arc::new(i),
);
test_round(
"rw-arc-clone",
100_000,
|| *Arc::clone(&*lock.read().unwrap()),
|i| *lock.write().unwrap() = Arc::new(i),
);
let arc = ArcSwap::from(Arc::new(42));
test_round(
"arc-load-store",
100_000,
|| **arc.load(),
|i| arc.store(Arc::new(i)),
);
test_round(
"arc-rcu",
100_000,
|| *arc.load_full(),
|i| {
arc.rcu(|_| Arc::new(i));
},
);
}

View file

@ -1,113 +0,0 @@
//! Benchmarks to track basic performance across changes.
//!
//! Slightly based on the <background.rs> benchmarks, but simplified and stripped down to run
//! reasonably fast.
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use arc_swap::access::{Access, Map};
use arc_swap::cache::Cache;
use arc_swap::ArcSwap;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use crossbeam_utils::thread;
/// Execute a group of measurements
///
/// It expects any kind of „environment“ is already in place for it.
fn batch(c: &mut Criterion, name: &str, shared_number: &ArcSwap<usize>) {
let mut g = c.benchmark_group(name);
g.bench_function("load", |b| {
b.iter(|| {
black_box(shared_number.load());
})
});
g.bench_function("load_full", |b| {
b.iter(|| {
black_box(shared_number.load_full());
})
});
g.bench_function("load_many", |b| {
// Here we simulate running out of the debt slots scenario
const MANY: usize = 32;
let mut guards = Vec::with_capacity(MANY);
b.iter(|| {
guards.push(black_box(shared_number.load()));
if guards.len() == MANY {
guards.clear();
}
})
});
g.bench_function("store", |b| {
b.iter(|| {
black_box(shared_number.store(Arc::new(42)));
})
});
g.bench_function("cache", |b| {
let mut cache = Cache::new(shared_number);
b.iter(|| {
black_box(cache.load());
})
});
g.finish();
}
fn with_background<F: Fn(&ArcSwap<usize>) + Sync>(
c: &mut Criterion,
name: &str,
cnt: usize,
noise: F,
) {
let stop = AtomicBool::new(false);
let shared_number = ArcSwap::from_pointee(42);
thread::scope(|s| {
// Start some background noise threads, to contend the arc swap.
for _ in 0..cnt {
s.spawn(|_| {
while !stop.load(Ordering::Relaxed) {
noise(&shared_number);
}
});
}
// Perform the benchmarks
batch(c, name, &shared_number);
// Ask the threads to terminate, so they don't disturb any other banchmarks
stop.store(true, Ordering::Relaxed);
})
.unwrap();
}
fn utilities(c: &mut Criterion) {
let mut g = c.benchmark_group("utilities");
struct Composed {
val: i32,
}
g.bench_function("access-map", |b| {
let a = Arc::new(ArcSwap::from_pointee(Composed { val: 42 }));
let m = Map::new(Arc::clone(&a), |c: &Composed| &c.val);
b.iter(|| {
let g = black_box(m.load());
assert_eq!(42, *g);
});
});
}
fn benchmark(c: &mut Criterion) {
batch(c, "uncontended", &ArcSwap::from_pointee(42));
with_background(c, "concurrent_loads", 2, |s| {
black_box(s.load());
});
with_background(c, "concurrent_store", 1, |s| {
black_box(s.store(Arc::new(42)));
});
utilities(c);
}
criterion_group!(benches, benchmark);
criterion_main!(benches);

View file

@ -1,16 +0,0 @@
#!/bin/sh
set -ex
rm -f Cargo.lock
cargo build
if [ "$RUST_VERSION" = 1.31.0 ] ; then
exit
fi
# Allow some warnings on the very old compiler.
export RUSTFLAGS="-D warnings"
cargo test --release --features weak,internal-test-strategies,experimental-strategies
cargo test --release --features weak,internal-test-strategies,experimental-strategies -- --ignored

View file

@ -1,546 +0,0 @@
#![deny(unsafe_code)]
//! Abstracting over accessing parts of stored value.
//!
//! Sometimes, there's a big globalish data structure (like a configuration for the whole program).
//! Then there are parts of the program that need access to up-to-date version of their *part* of
//! the configuration, but for reasons of code separation and reusability, it is not desirable to
//! pass the whole configuration to each of the parts.
//!
//! This module provides means to grant the parts access to the relevant subsets of such global
//! data structure while masking the fact it is part of the bigger whole from the component.
//!
//! Note that the [`cache`][crate::cache] module has its own [`Access`][crate::cache::Access] trait
//! that serves a similar purpose, but with cached access. The signatures are different, therefore
//! an incompatible trait.
//!
//! # The general idea
//!
//! Each part of the code accepts generic [`Access<T>`][Access] for the `T` of its interest. This
//! provides means to load current version of the structure behind the scenes and get only the
//! relevant part, without knowing what the big structure is.
//!
//! For technical reasons, the [`Access`] trait is not object safe. If type erasure is desired, it
//! is possible use the [`DynAccess`][crate::access::DynAccess] instead, which is object safe, but
//! slightly slower.
//!
//! For some cases, it is possible to use [`ArcSwapAny::map`]. If that is not flexible enough, the
//! [`Map`] type can be created directly.
//!
//! Note that the [`Access`] trait is also implemented for [`ArcSwapAny`] itself. Additionally,
//! there's the [`Constant`][crate::access::Constant] helper type, which is useful mostly for
//! testing (it doesn't allow reloading).
//!
//! # Performance
//!
//! In general, these utilities use [`ArcSwapAny::load`] internally and then apply the provided
//! transformation. This has several consequences:
//!
//! * Limitations of the [`load`][ArcSwapAny::load] apply including the recommendation to not
//! hold the returned guard object for too long, but long enough to get consistency.
//! * The transformation should be cheap optimally just borrowing into the structure.
//!
//! # Examples
//!
//! ```rust
//! use std::sync::Arc;
//! use std::thread::{self, JoinHandle};
//! use std::time::Duration;
//!
//! use arc_swap::ArcSwap;
//! use arc_swap::access::{Access, Constant, Map};
//!
//! fn work_with_usize<A: Access<usize> + Send + 'static>(a: A) -> JoinHandle<()> {
//! thread::spawn(move || {
//! let mut value = 0;
//! while value != 42 {
//! let guard = a.load();
//! value = *guard;
//! println!("{}", value);
//! // Not strictly necessary, but dropping the guard can free some resources, like
//! // slots for tracking what values are still in use. We do it before the sleeping,
//! // not at the end of the scope.
//! drop(guard);
//! thread::sleep(Duration::from_millis(50));
//! }
//! })
//! }
//!
//! // Passing the whole thing directly
//! // (If we kept another Arc to it, we could change the value behind the scenes)
//! work_with_usize(Arc::new(ArcSwap::from_pointee(42))).join().unwrap();
//!
//! // Passing a subset of a structure
//! struct Cfg {
//! value: usize,
//! }
//!
//! let cfg = Arc::new(ArcSwap::from_pointee(Cfg { value: 0 }));
//! let thread = work_with_usize(Map::new(Arc::clone(&cfg), |cfg: &Cfg| &cfg.value));
//! cfg.store(Arc::new(Cfg { value: 42 }));
//! thread.join().unwrap();
//!
//! // Passing a constant that can't change. Useful mostly for testing purposes.
//! work_with_usize(Constant(42)).join().unwrap();
//! ```
use core::marker::PhantomData;
use core::ops::Deref;
use alloc::boxed::Box;
use alloc::rc::Rc;
use alloc::sync::Arc;
use super::ref_cnt::RefCnt;
use super::strategy::Strategy;
use super::{ArcSwapAny, Guard};
/// Abstracts over ways code can get access to a value of type `T`.
///
/// This is the trait that parts of code will use when accessing a subpart of the big data
/// structure. See the [module documentation](index.html) for details.
pub trait Access<T> {
/// A guard object containing the value and keeping it alive.
///
/// For technical reasons, the library doesn't allow direct access into the stored value. A
/// temporary guard object must be loaded, that keeps the actual value alive for the time of
/// use.
type Guard: Deref<Target = T>;
/// The loading method.
///
/// This returns the guard that holds the actual value. Should be called anew each time a fresh
/// value is needed.
fn load(&self) -> Self::Guard;
}
impl<T, A: Access<T> + ?Sized, P: Deref<Target = A>> Access<T> for P {
type Guard = A::Guard;
fn load(&self) -> Self::Guard {
self.deref().load()
}
}
impl<T> Access<T> for dyn DynAccess<T> + '_ {
type Guard = DynGuard<T>;
fn load(&self) -> Self::Guard {
self.load()
}
}
impl<T> Access<T> for dyn DynAccess<T> + '_ + Send {
type Guard = DynGuard<T>;
fn load(&self) -> Self::Guard {
self.load()
}
}
impl<T> Access<T> for dyn DynAccess<T> + '_ + Sync + Send {
type Guard = DynGuard<T>;
fn load(&self) -> Self::Guard {
self.load()
}
}
impl<T: RefCnt, S: Strategy<T>> Access<T> for ArcSwapAny<T, S> {
type Guard = Guard<T, S>;
fn load(&self) -> Self::Guard {
self.load()
}
}
#[derive(Debug)]
#[doc(hidden)]
pub struct DirectDeref<T: RefCnt, S: Strategy<T>>(Guard<T, S>);
impl<T, S: Strategy<Arc<T>>> Deref for DirectDeref<Arc<T>, S> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref().deref()
}
}
impl<T, S: Strategy<Arc<T>>> Access<T> for ArcSwapAny<Arc<T>, S> {
type Guard = DirectDeref<Arc<T>, S>;
fn load(&self) -> Self::Guard {
DirectDeref(self.load())
}
}
impl<T, S: Strategy<Rc<T>>> Deref for DirectDeref<Rc<T>, S> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref().deref()
}
}
impl<T, S: Strategy<Rc<T>>> Access<T> for ArcSwapAny<Rc<T>, S> {
type Guard = DirectDeref<Rc<T>, S>;
fn load(&self) -> Self::Guard {
DirectDeref(self.load())
}
}
#[doc(hidden)]
pub struct DynGuard<T: ?Sized>(Box<dyn Deref<Target = T>>);
impl<T: ?Sized> Deref for DynGuard<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
/// An object-safe version of the [`Access`] trait.
///
/// This can be used instead of the [`Access`] trait in case a type erasure is desired. This has
/// the effect of performance hit (due to boxing of the result and due to dynamic dispatch), but
/// makes certain code simpler and possibly makes the executable smaller.
///
/// This is automatically implemented for everything that implements [`Access`].
///
/// # Examples
///
/// ```rust
/// use arc_swap::access::{Constant, DynAccess};
///
/// fn do_something(value: Box<dyn DynAccess<usize> + Send>) {
/// let v = value.load();
/// println!("{}", *v);
/// }
///
/// do_something(Box::new(Constant(42)));
/// ```
pub trait DynAccess<T> {
/// The equivalent of [`Access::load`].
fn load(&self) -> DynGuard<T>;
}
impl<T, A> DynAccess<T> for A
where
A: Access<T>,
A::Guard: 'static,
{
fn load(&self) -> DynGuard<T> {
DynGuard(Box::new(Access::load(self)))
}
}
/// [DynAccess] to [Access] wrapper.
///
/// In previous versions, `Box<dyn DynAccess>` didn't implement [Access], to use inside [Map] one
/// could use this wrapper. Since then, a way was found to solve it. In most cases, this wrapper is
/// no longer necessary.
///
/// This is left in place for two reasons:
/// * Backwards compatibility.
/// * Corner-cases not covered by the found solution. For example, trait inheritance in the form of
/// `Box<dyn SomeTrait>` where `SomeTrait: Access` doesn't work out of the box and still needs
/// this wrapper.
///
/// # Examples
///
/// The example is for the simple case (which is no longer needed, but may help as an inspiration).
///
/// ```rust
/// use std::sync::Arc;
///
/// use arc_swap::ArcSwap;
/// use arc_swap::access::{AccessConvert, DynAccess, Map};
///
/// struct Inner {
/// val: usize,
/// }
///
/// struct Middle {
/// inner: Inner,
/// }
///
/// struct Outer {
/// middle: Middle,
/// }
///
/// let outer = Arc::new(ArcSwap::from_pointee(Outer {
/// middle: Middle {
/// inner: Inner {
/// val: 42,
/// }
/// }
/// }));
///
/// let middle: Arc<dyn DynAccess<Middle>> =
/// Arc::new(Map::new(outer, |outer: &Outer| &outer.middle));
/// let inner: Arc<dyn DynAccess<Inner>> =
/// Arc::new(Map::new(AccessConvert(middle), |middle: &Middle| &middle.inner));
/// let guard = inner.load();
/// assert_eq!(42, guard.val);
/// ```
pub struct AccessConvert<D>(pub D);
impl<T, D> Access<T> for AccessConvert<D>
where
D: Deref,
D::Target: DynAccess<T>,
{
type Guard = DynGuard<T>;
fn load(&self) -> Self::Guard {
self.0.load()
}
}
#[doc(hidden)]
#[derive(Copy, Clone, Debug)]
pub struct MapGuard<G, F, T, R> {
guard: G,
projection: F,
_t: PhantomData<fn(&T) -> &R>,
}
impl<G, F, T, R> Deref for MapGuard<G, F, T, R>
where
G: Deref<Target = T>,
F: Fn(&T) -> &R,
{
type Target = R;
fn deref(&self) -> &R {
(self.projection)(&self.guard)
}
}
/// An adaptor to provide access to a part of larger structure.
///
/// This is the *active* part of this module. Use the [module documentation](index.html) for the
/// details.
#[derive(Copy, Clone, Debug)]
pub struct Map<A, T, F> {
access: A,
projection: F,
_t: PhantomData<fn() -> T>,
}
impl<A, T, F> Map<A, T, F> {
/// Creates a new instance.
///
/// # Parameters
///
/// * `access`: Access to the bigger structure. This is usually something like `Arc<ArcSwap>`
/// or `&ArcSwap`. It is technically possible to use any other [`Access`] here, though, for
/// example to sub-delegate into even smaller structure from a [`Map`] (or generic
/// [`Access`]).
/// * `projection`: A function (or closure) responsible to providing a reference into the
/// bigger bigger structure, selecting just subset of it. In general, it is expected to be
/// *cheap* (like only taking reference).
pub fn new<R>(access: A, projection: F) -> Self
where
F: Fn(&T) -> &R + Clone,
{
Map {
access,
projection,
_t: PhantomData,
}
}
}
impl<A, F, T, R> Access<R> for Map<A, T, F>
where
A: Access<T>,
F: Fn(&T) -> &R + Clone,
{
type Guard = MapGuard<A::Guard, F, T, R>;
fn load(&self) -> Self::Guard {
let guard = self.access.load();
MapGuard {
guard,
projection: self.projection.clone(),
_t: PhantomData,
}
}
}
#[doc(hidden)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct ConstantDeref<T>(T);
impl<T> Deref for ConstantDeref<T> {
type Target = T;
fn deref(&self) -> &T {
&self.0
}
}
/// Access to an constant.
///
/// This wraps a constant value to provide [`Access`] to it. It is constant in the sense that,
/// unlike [`ArcSwapAny`] and [`Map`], the loaded value will always stay the same (there's no
/// remote `store`).
///
/// The purpose is mostly testing and plugging a parameter that works generically from code that
/// doesn't need the updating functionality.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct Constant<T>(pub T);
impl<T: Clone> Access<T> for Constant<T> {
type Guard = ConstantDeref<T>;
fn load(&self) -> Self::Guard {
ConstantDeref(self.0.clone())
}
}
#[cfg(test)]
mod tests {
use super::super::{ArcSwap, ArcSwapOption};
use super::*;
fn check_static_dispatch_direct<A: Access<usize>>(a: A) {
assert_eq!(42, *a.load());
}
fn check_static_dispatch<A: Access<Arc<usize>>>(a: A) {
assert_eq!(42, **a.load());
}
/// Tests dispatching statically from arc-swap works
#[test]
fn static_dispatch() {
let a = ArcSwap::from_pointee(42);
check_static_dispatch_direct(&a);
check_static_dispatch(&a);
check_static_dispatch(a);
}
fn check_dyn_dispatch_direct(a: &dyn DynAccess<usize>) {
assert_eq!(42, *a.load());
}
fn check_dyn_dispatch(a: &dyn DynAccess<Arc<usize>>) {
assert_eq!(42, **a.load());
}
/// Tests we can also do a dynamic dispatch of the companion trait
#[test]
fn dyn_dispatch() {
let a = ArcSwap::from_pointee(42);
check_dyn_dispatch_direct(&a);
check_dyn_dispatch(&a);
}
fn check_transition<A>(a: A)
where
A: Access<usize>,
A::Guard: 'static,
{
check_dyn_dispatch_direct(&a)
}
/// Tests we can easily transition from the static dispatch trait to the dynamic one
#[test]
fn transition() {
let a = ArcSwap::from_pointee(42);
check_transition(&a);
check_transition(a);
}
/// Test we can dispatch from Arc<ArcSwap<_>> or similar.
#[test]
fn indirect() {
let a = Arc::new(ArcSwap::from_pointee(42));
check_static_dispatch(&a);
check_dyn_dispatch(&a);
}
struct Cfg {
value: usize,
}
#[test]
fn map() {
let a = ArcSwap::from_pointee(Cfg { value: 42 });
let map = a.map(|a: &Cfg| &a.value);
check_static_dispatch_direct(&map);
check_dyn_dispatch_direct(&map);
}
#[test]
fn map_option_some() {
let a = ArcSwapOption::from_pointee(Cfg { value: 42 });
let map = a.map(|a: &Option<Arc<Cfg>>| a.as_ref().map(|c| &c.value).unwrap());
check_static_dispatch_direct(&map);
check_dyn_dispatch_direct(&map);
}
#[test]
fn map_option_none() {
let a = ArcSwapOption::empty();
let map = a.map(|a: &Option<Arc<Cfg>>| a.as_ref().map(|c| &c.value).unwrap_or(&42));
check_static_dispatch_direct(&map);
check_dyn_dispatch_direct(&map);
}
#[test]
fn constant() {
let c = Constant(42);
check_static_dispatch_direct(c);
check_dyn_dispatch_direct(&c);
check_static_dispatch_direct(c);
}
#[test]
fn map_reload() {
let a = ArcSwap::from_pointee(Cfg { value: 0 });
let map = a.map(|cfg: &Cfg| &cfg.value);
assert_eq!(0, *Access::load(&map));
a.store(Arc::new(Cfg { value: 42 }));
assert_eq!(42, *Access::load(&map));
}
// Compile tests for dynamic access
fn _expect_access<T>(_: impl Access<T>) {}
fn _dyn_access<T>(x: Box<dyn DynAccess<T> + '_>) {
_expect_access(x)
}
fn _dyn_access_send<T>(x: Box<dyn DynAccess<T> + '_ + Send>) {
_expect_access(x)
}
fn _dyn_access_send_sync<T>(x: Box<dyn DynAccess<T> + '_ + Send + Sync>) {
_expect_access(x)
}
#[test]
#[allow(clippy::arc_with_non_send_sync)] // Whatever, it's tests...
fn double_dyn_access_complex() {
struct Inner {
val: usize,
}
struct Middle {
inner: Inner,
}
struct Outer {
middle: Middle,
}
let outer = Arc::new(ArcSwap::from_pointee(Outer {
middle: Middle {
inner: Inner { val: 42 },
},
}));
let middle: Arc<dyn DynAccess<Middle>> =
Arc::new(Map::new(outer, |outer: &Outer| &outer.middle));
let inner: Arc<dyn DynAccess<Inner>> =
Arc::new(Map::new(middle, |middle: &Middle| &middle.inner));
// Damn. We have the DynAccess wrapper in scope and need to disambiguate the inner.load()
let guard = Access::load(&inner);
assert_eq!(42, guard.val);
}
}

View file

@ -1,72 +0,0 @@
use super::{Guard, RefCnt};
mod sealed {
pub trait Sealed {}
}
use self::sealed::Sealed;
/// A trait describing things that can be turned into a raw pointer.
///
/// This is just an abstraction of things that can be passed to the
/// [`compare_and_swap`](struct.ArcSwapAny.html#method.compare_and_swap).
///
/// # Examples
///
/// ```
/// use std::ptr;
/// use std::sync::Arc;
///
/// use arc_swap::ArcSwapOption;
///
/// let a = Arc::new(42);
/// let shared = ArcSwapOption::from(Some(Arc::clone(&a)));
///
/// shared.compare_and_swap(&a, Some(Arc::clone(&a)));
/// shared.compare_and_swap(&None::<Arc<_>>, Some(Arc::clone(&a)));
/// shared.compare_and_swap(shared.load(), Some(Arc::clone(&a)));
/// shared.compare_and_swap(&shared.load(), Some(Arc::clone(&a)));
/// shared.compare_and_swap(ptr::null(), Some(Arc::clone(&a)));
/// ```
///
/// Due to technical limitation, this is not implemented for owned `Arc`/`Option<Arc<_>>`, they
/// need to be borrowed.
pub trait AsRaw<T>: Sealed {
/// Converts the value into a raw pointer.
fn as_raw(&self) -> *mut T;
}
impl<'a, T: RefCnt> Sealed for &'a T {}
impl<'a, T: RefCnt> AsRaw<T::Base> for &'a T {
fn as_raw(&self) -> *mut T::Base {
T::as_ptr(self)
}
}
impl<'a, T: RefCnt> Sealed for &'a Guard<T> {}
impl<'a, T: RefCnt> AsRaw<T::Base> for &'a Guard<T> {
fn as_raw(&self) -> *mut T::Base {
T::as_ptr(self)
}
}
impl<T: RefCnt> Sealed for Guard<T> {}
impl<T: RefCnt> AsRaw<T::Base> for Guard<T> {
fn as_raw(&self) -> *mut T::Base {
T::as_ptr(self)
}
}
impl<T> Sealed for *mut T {}
impl<T> AsRaw<T> for *mut T {
fn as_raw(&self) -> *mut T {
*self
}
}
impl<T> Sealed for *const T {}
impl<T> AsRaw<T> for *const T {
fn as_raw(&self) -> *mut T {
*self as *mut T
}
}

View file

@ -1,343 +0,0 @@
#![deny(unsafe_code)]
//! Caching handle into the [ArcSwapAny].
//!
//! The [Cache] keeps a copy of the internal [Arc] for faster access.
//!
//! [Arc]: std::sync::Arc
use core::ops::Deref;
use core::sync::atomic::Ordering;
use super::ref_cnt::RefCnt;
use super::strategy::Strategy;
use super::ArcSwapAny;
/// Generalization of caches providing access to `T`.
///
/// This abstracts over all kinds of caches that can provide a cheap access to values of type `T`.
/// This is useful in cases where some code doesn't care if the `T` is the whole structure or just
/// a part of it.
///
/// See the example at [`Cache::map`].
pub trait Access<T> {
/// Loads the value from cache.
///
/// This revalidates the value in the cache, then provides the access to the cached value.
fn load(&mut self) -> &T;
}
/// Caching handle for [`ArcSwapAny`][ArcSwapAny].
///
/// Instead of loading the [`Arc`][Arc] on every request from the shared storage, this keeps
/// another copy inside itself. Upon request it only cheaply revalidates it is up to
/// date. If it is, access is significantly faster. If it is stale, the [load_full] is done and the
/// cache value is replaced. Under a read-heavy loads, the measured speedup are 10-25 times,
/// depending on the architecture.
///
/// There are, however, downsides:
///
/// * The handle needs to be kept around by the caller (usually, one per thread). This is fine if
/// there's one global `ArcSwapAny`, but starts being tricky with eg. data structures build from
/// them.
/// * As it keeps a copy of the [Arc] inside the cache, the old value may be kept alive for longer
/// period of time it is replaced by the new value on [load][Cache::load]. You may not want to
/// use this if dropping the old value in timely manner is important (possibly because of
/// releasing large amount of RAM or because of closing file handles).
///
/// # Examples
///
/// ```rust
/// # fn do_something<V>(_v: V) { }
/// use std::sync::Arc;
/// use std::sync::atomic::{AtomicBool, Ordering};
///
/// use arc_swap::{ArcSwap, Cache};
///
/// let shared = Arc::new(ArcSwap::from_pointee(42));
/// # let mut threads = Vec::new();
/// let terminate = Arc::new(AtomicBool::new(false));
/// // Start 10 worker threads...
/// for _ in 0..10 {
/// let mut cache = Cache::new(Arc::clone(&shared));
/// let terminate = Arc::clone(&terminate);
/// # let thread =
/// std::thread::spawn(move || {
/// // Keep loading it like mad..
/// while !terminate.load(Ordering::Relaxed) {
/// let value = cache.load();
/// do_something(value);
/// }
/// });
/// # threads.push(thread);
/// }
/// shared.store(Arc::new(12));
/// # terminate.store(true, Ordering::Relaxed);
/// # for thread in threads { thread.join().unwrap() }
/// ```
///
/// Another one with using a thread local storage and explicit types:
///
/// ```rust
/// # use std::sync::Arc;
/// # use std::ops::Deref;
/// # use std::cell::RefCell;
/// #
/// # use arc_swap::ArcSwap;
/// # use arc_swap::cache::Cache;
/// # use once_cell::sync::Lazy;
/// #
/// # #[derive(Debug, Default)]
/// # struct Config;
/// #
/// static CURRENT_CONFIG: Lazy<ArcSwap<Config>> = Lazy::new(|| ArcSwap::from_pointee(Config::default()));
///
/// thread_local! {
/// static CACHE: RefCell<Cache<&'static ArcSwap<Config>, Arc<Config>>> = RefCell::new(Cache::from(CURRENT_CONFIG.deref()));
/// }
///
/// CACHE.with(|c| {
/// // * RefCell needed, because load on cache is `&mut`.
/// // * You want to operate inside the `with` cloning the Arc is comparably expensive as
/// // ArcSwap::load itself and whatever you'd save by the cache would be lost on that.
/// println!("{:?}", c.borrow_mut().load());
/// });
/// ```
///
/// [Arc]: std::sync::Arc
/// [load_full]: ArcSwapAny::load_full
#[derive(Clone, Debug)]
pub struct Cache<A, T> {
arc_swap: A,
cached: T,
}
impl<A, T, S> Cache<A, T>
where
A: Deref<Target = ArcSwapAny<T, S>>,
T: RefCnt,
S: Strategy<T>,
{
/// Creates a new caching handle.
///
/// The parameter is something dereferencing into an [`ArcSwapAny`] (eg. either to [`ArcSwap`]
/// or [`ArcSwapOption`]). That can be [`ArcSwapAny`] itself, but that's not very useful. But
/// it also can be a reference to it or `Arc`, which makes it possible to share the
/// [`ArcSwapAny`] with multiple caches or access it in non-cached way too.
///
/// [`ArcSwapOption`]: crate::ArcSwapOption
/// [`ArcSwap`]: crate::ArcSwap
pub fn new(arc_swap: A) -> Self {
let cached = arc_swap.load_full();
Self { arc_swap, cached }
}
/// Gives access to the (possibly shared) cached [`ArcSwapAny`].
pub fn arc_swap(&self) -> &A::Target {
&self.arc_swap
}
/// Loads the currently held value.
///
/// This first checks if the cached value is up to date. This check is very cheap.
///
/// If it is up to date, the cached value is simply returned without additional costs. If it is
/// outdated, a load is done on the underlying shared storage. The newly loaded value is then
/// stored in the cache and returned.
#[inline]
pub fn load(&mut self) -> &T {
self.revalidate();
self.load_no_revalidate()
}
#[inline]
fn load_no_revalidate(&self) -> &T {
&self.cached
}
#[inline]
fn revalidate(&mut self) {
let cached_ptr = RefCnt::as_ptr(&self.cached);
// Node: Relaxed here is fine. We do not synchronize any data through this, we already have
// it synchronized in self.cache. We just want to check if it changed, if it did, the
// load_full will be responsible for any synchronization needed.
let shared_ptr = self.arc_swap.ptr.load(Ordering::Relaxed);
if cached_ptr != shared_ptr {
self.cached = self.arc_swap.load_full();
}
}
/// Turns this cache into a cache with a projection inside the cached value.
///
/// You'd use this in case when some part of code needs access to fresh values of `U`, however
/// a bigger structure containing `U` is provided by this cache. The possibility of giving the
/// whole structure to the part of the code falls short in terms of reusability (the part of
/// the code could be used within multiple contexts, each with a bigger different structure
/// containing `U`) and code separation (the code shouldn't needs to know about the big
/// structure).
///
/// # Warning
///
/// As the provided `f` is called inside every [`load`][Access::load], this one should be
/// cheap. Most often it is expected to be just a closure taking reference of some inner field.
///
/// For the same reasons, it should not have side effects and should never panic (these will
/// not break Rust's safety rules, but might produce behaviour you don't expect).
///
/// # Examples
///
/// ```rust
/// use arc_swap::ArcSwap;
/// use arc_swap::cache::{Access, Cache};
///
/// struct InnerCfg {
/// answer: usize,
/// }
///
/// struct FullCfg {
/// inner: InnerCfg,
/// }
///
/// fn use_inner<A: Access<InnerCfg>>(cache: &mut A) {
/// let value = cache.load();
/// println!("The answer is: {}", value.answer);
/// }
///
/// let full_cfg = ArcSwap::from_pointee(FullCfg {
/// inner: InnerCfg {
/// answer: 42,
/// }
/// });
/// let cache = Cache::new(&full_cfg);
/// use_inner(&mut cache.map(|full| &full.inner));
///
/// let inner_cfg = ArcSwap::from_pointee(InnerCfg { answer: 24 });
/// let mut inner_cache = Cache::new(&inner_cfg);
/// use_inner(&mut inner_cache);
/// ```
pub fn map<F, U>(self, f: F) -> MapCache<A, T, F>
where
F: FnMut(&T) -> &U,
{
MapCache {
inner: self,
projection: f,
}
}
}
impl<A, T, S> Access<T::Target> for Cache<A, T>
where
A: Deref<Target = ArcSwapAny<T, S>>,
T: Deref<Target = <T as RefCnt>::Base> + RefCnt,
S: Strategy<T>,
{
fn load(&mut self) -> &T::Target {
self.load().deref()
}
}
impl<A, T, S> From<A> for Cache<A, T>
where
A: Deref<Target = ArcSwapAny<T, S>>,
T: RefCnt,
S: Strategy<T>,
{
fn from(arc_swap: A) -> Self {
Self::new(arc_swap)
}
}
/// An implementation of a cache with a projection into the accessed value.
///
/// This is the implementation structure for [`Cache::map`]. It can't be created directly and it
/// should be used through the [`Access`] trait.
#[derive(Clone, Debug)]
pub struct MapCache<A, T, F> {
inner: Cache<A, T>,
projection: F,
}
impl<A, T, S, F, U> Access<U> for MapCache<A, T, F>
where
A: Deref<Target = ArcSwapAny<T, S>>,
T: RefCnt,
S: Strategy<T>,
F: FnMut(&T) -> &U,
{
fn load(&mut self) -> &U {
(self.projection)(self.inner.load())
}
}
#[cfg(test)]
mod tests {
use alloc::sync::Arc;
use super::*;
use crate::{ArcSwap, ArcSwapOption};
#[test]
fn cached_value() {
let a = ArcSwap::from_pointee(42);
let mut c1 = Cache::new(&a);
let mut c2 = Cache::new(&a);
assert_eq!(42, **c1.load());
assert_eq!(42, **c2.load());
a.store(Arc::new(43));
assert_eq!(42, **c1.load_no_revalidate());
assert_eq!(43, **c1.load());
}
#[test]
fn cached_through_arc() {
let a = Arc::new(ArcSwap::from_pointee(42));
let mut c = Cache::new(Arc::clone(&a));
assert_eq!(42, **c.load());
a.store(Arc::new(0));
drop(a); // A is just one handle, the ArcSwap is kept alive by the cache.
}
#[test]
fn cache_option() {
let a = ArcSwapOption::from_pointee(42);
let mut c = Cache::new(&a);
assert_eq!(42, **c.load().as_ref().unwrap());
a.store(None);
assert!(c.load().is_none());
}
struct Inner {
answer: usize,
}
struct Outer {
inner: Inner,
}
#[test]
fn map_cache() {
let a = ArcSwap::from_pointee(Outer {
inner: Inner { answer: 42 },
});
let mut cache = Cache::new(&a);
let mut inner = cache.clone().map(|outer| &outer.inner);
let mut answer = cache.clone().map(|outer| &outer.inner.answer);
assert_eq!(42, cache.load().inner.answer);
assert_eq!(42, inner.load().answer);
assert_eq!(42, *answer.load());
a.store(Arc::new(Outer {
inner: Inner { answer: 24 },
}));
assert_eq!(24, cache.load().inner.answer);
assert_eq!(24, inner.load().answer);
assert_eq!(24, *answer.load());
}
}

View file

@ -1,93 +0,0 @@
// The doc tests allow us to do a compile_fail test, which is cool and what we want, but we don't
// want to expose this in the docs, so we use a private struct for that reason.
//
// Note we also bundle one that *does* compile with each, just to make sure they don't silently
// not-compile by some different reason.
//! ```rust,compile_fail
//! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42));
//! std::thread::spawn(|| {
//! drop(shared);
//! });
//! ```
//!
//! ```rust
//! let shared = arc_swap::ArcSwap::from_pointee(42);
//! std::thread::spawn(|| {
//! drop(shared);
//! })
//! .join()
//! .unwrap();
//! ```
//!
//! ```rust,compile_fail
//! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42));
//! let guard = shared.load();
//! std::thread::spawn(|| {
//! drop(guard);
//! });
//! ```
//!
//! ```rust
//! let shared = arc_swap::ArcSwap::from_pointee(42);
//! let guard = shared.load();
//! std::thread::spawn(|| {
//! drop(guard);
//! })
//! .join()
//! .unwrap();
//! ```
//!
//! ```rust,compile_fail
//! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42));
//! crossbeam_utils::thread::scope(|scope| {
//! scope.spawn(|_| {
//! let _ = &shared;
//! });
//! }).unwrap();
//! ```
//!
//! ```rust
//! let shared = arc_swap::ArcSwap::from_pointee(42);
//! crossbeam_utils::thread::scope(|scope| {
//! scope.spawn(|_| {
//! let _ = &shared;
//! });
//! }).unwrap();
//! ```
//!
//! ```rust,compile_fail
//! let shared = arc_swap::ArcSwap::from_pointee(std::cell::Cell::new(42));
//! let guard = shared.load();
//! crossbeam_utils::thread::scope(|scope| {
//! scope.spawn(|_| {
//! let _ = &guard;
//! });
//! }).unwrap();
//! ```
//!
//! ```rust
//! let shared = arc_swap::ArcSwap::from_pointee(42);
//! let guard = shared.load();
//! crossbeam_utils::thread::scope(|scope| {
//! scope.spawn(|_| {
//! let _ = &guard;
//! });
//! }).unwrap();
//! ```
//!
//! See that `ArcSwapAny<Rc>` really isn't Send.
//! ```rust
//! use std::sync::Arc;
//! use arc_swap::ArcSwapAny;
//!
//! let a: ArcSwapAny<Arc<usize>> = ArcSwapAny::new(Arc::new(42));
//! std::thread::spawn(move || drop(a)).join().unwrap();
//! ```
//!
//! ```rust,compile_fail
//! use std::rc::Rc;
//! use arc_swap::ArcSwapAny;
//!
//! let a: ArcSwapAny<Rc<usize>> = ArcSwapAny::new(Rc::new(42));
//! std::thread::spawn(move || drop(a));
//! ```

View file

@ -1,76 +0,0 @@
//! The fast slots for the primary strategy.
//!
//! They are faster, but fallible (in case the slots run out or if there's a collision with a
//! writer thread, this gives up and falls back to secondary strategy).
//!
//! They are based on hazard pointer ideas. To acquire one, the pointer is loaded, stored in the
//! slot and the debt is confirmed by loading it again and checking it is the same.
//!
//! # Orderings
//!
//! We ensure just one thing here. Since we do both the acquisition of the slot and the exchange of
//! the pointer in the writer with SeqCst, we are guaranteed to either see the change in case it
//! hits somewhere in between the two reads of the pointer, or to have successfully acquired it
//! before the change and before any cleanup of the old pointer happened (in which case we know the
//! writer will see our debt).
use core::cell::Cell;
use core::slice::Iter;
use core::sync::atomic::Ordering::*;
use super::Debt;
const DEBT_SLOT_CNT: usize = 8;
/// Thread-local information for the [`Slots`]
#[derive(Default)]
pub(super) struct Local {
// The next slot in round-robin rotation. Heuristically tries to balance the load across them
// instead of having all of them stuffed towards the start of the array which gets
// unsuccessfully iterated through every time.
offset: Cell<usize>,
}
/// Bunch of fast debt slots.
#[derive(Default)]
pub(super) struct Slots([Debt; DEBT_SLOT_CNT]);
impl Slots {
/// Try to allocate one slot and get the pointer in it.
///
/// Fails if there are no free slots.
#[inline]
pub(super) fn get_debt(&self, ptr: usize, local: &Local) -> Option<&Debt> {
// Trick with offsets: we rotate through the slots (save the value from last time)
// so successive leases are likely to succeed on the first attempt (or soon after)
// instead of going through the list of already held ones.
let offset = local.offset.get();
let len = self.0.len();
for i in 0..len {
let i = (i + offset) % len;
// Note: the indexing check is almost certainly optimised out because the len
// is used above. And using .get_unchecked was actually *slower*.
let slot = &self.0[i];
if slot.0.load(Relaxed) == Debt::NONE {
// We are allowed to split into the check and acquiring the debt. That's because we
// are the only ones allowed to change NONE to something else. But we still need a
// read-write operation wit SeqCst on it :-(
let old = slot.0.swap(ptr, SeqCst);
debug_assert_eq!(Debt::NONE, old);
local.offset.set(i + 1);
return Some(&self.0[i]);
}
}
None
}
}
impl<'a> IntoIterator for &'a Slots {
type Item = &'a Debt;
type IntoIter = Iter<'a, Debt>;
fn into_iter(self) -> Self::IntoIter {
self.0.iter()
}
}

View file

@ -1,334 +0,0 @@
//! Slots and global/thread local data for the Helping strategy.
//!
//! This is inspired (but not an exact copy) of
//! <https://pvk.ca/Blog/2020/07/07/flatter-wait-free-hazard-pointers/>. The debts are mostly
//! copies of the ones used by the hybrid strategy, but modified a bit. Just like in the hybrid
//! strategy, in case the slots run out or when the writer updates the value, the debts are paid by
//! incrementing the ref count (which is a little slower, but still wait-free/lock-free and still
//! in order of nanoseconds).
//!
//! ## Reader, the fast path
//!
//! * Publish an active address the address we'll be loading stuff from.
//! * Puts a generation into the control.
//! * Loads the pointer and puts it to the debt slot.
//! * Confirms by CaS-replacing the generation back to idle state.
//!
//! * Later, we pay it back by CaS-replacing it with the NO_DEPT (like any other slot).
//!
//! ## Writer, the non-colliding path
//!
//! * Replaces the pointer in the storage.
//! * The writer walks over all debts. It pays each debt that it is concerned with by bumping the
//! reference and replacing the dept with NO_DEPT. The relevant reader will fail in the CaS
//! (either because it finds NO_DEPT or other pointer in there) and knows the reference was
//! bumped, so it needs to decrement it. Note that it is possible that someone also reuses the
//! slot for the _same_ pointer. In that case that reader will set it to NO_DEPT and the newer
//! reader will have a pre-paid debt, which is fine.
//!
//! ## The collision path
//!
//! The reservation of a slot is not atomic, therefore a writer can observe the reservation in
//! progress. But it doesn't want to wait for it to complete (it wants to be lock-free, which means
//! it needs to be able to resolve the situation on its own).
//!
//! The way it knows it is in progress of the reservation is by seeing a generation in there (it has
//! a distinct tag). In that case it'll try to:
//!
//! * First verify that the reservation is being done for the same address it modified, by reading
//! and re-confirming the active_addr slot corresponding to the currently handled node. If it is
//! for some other address, the writer doesn't have to be concerned and proceeds to the next slot.
//! * It does a full load. That is fine, because the writer must be on a different thread than the
//! reader and therefore there is at least one free slot. Full load means paying the debt right
//! away by incrementing the reference count.
//! * Then it tries to pass the already fully protected/paid pointer to the reader. It writes it to
//! an envelope and CaS-replaces it into the control, instead of the generation (if it fails,
//! someone has been faster and it rolls back). We need the envelope because the pointer itself
//! doesn't have to be aligned to 4 bytes and we need the space for tags to distinguish the types
//! of info in control; we can ensure the envelope is).
//! * The reader then finds the generation got replaced by a pointer to the envelope and uses that
//! pointer inside the envelope. It aborts its own debt. This effectively exchanges the envelopes
//! between the threads so each one has an envelope ready for future.
//!
//! ## ABA protection
//!
//! The generation as pre-reserving the slot allows the writer to make sure it is offering the
//! loaded pointer to the same reader and that the read value is new enough (and of the same type).
//!
//! This solves the general case, but there's also much less frequent but theoretical ABA problem
//! that could lead to UB, if left unsolved:
//!
//! * There is a collision on generation G.
//! * The writer loads a pointer, bumps it.
//! * In the meantime, all the 2^30 or 2^62 generations (depending on the usize width) generations
//! wrap around.
//! * The writer stores the outdated and possibly different-typed pointer in there and the reader
//! uses it.
//!
//! To mitigate that, every time the counter overflows we take the current node and un-assign it
//! from our current thread. We mark it as in "cooldown" and let it in there until there are no
//! writers messing with that node any more (if they are not on the node, they can't experience the
//! ABA problem on it). After that, we are allowed to use it again.
//!
//! This doesn't block the reader, it'll simply find *a* node next time this one, or possibly a
//! different (or new) one.
//!
//! # Orderings
//!
//! The linked lists/nodes are already provided for us. So we just need to make sure the debt
//! juggling is done right. We assume that the local node is ours to write to (others have only
//! limited right to write to certain fields under certain conditions) and that we are counted into
//! active writers while we dig through it on the writer end.
//!
//! We use SeqCst on a read-write operation both here at the very start of the sequence (storing
//! the generation into the control) and in the writer on the actual pointer. That establishes a
//! relation of what has happened first.
//!
//! After that we split the time into segments by read-write operations with AcqRel read-write
//! operations on the control. There's just one control in play for both threads so we don't need
//! SeqCst and the segments are understood by both the same way. The writer can sometimes use only
//! load-Acquire on that, because it needs to only read from data written by the reader. It'll
//! always see data from at least the segment before the observed control value and uses CaS to
//! send the results back, so it can't go into the past.
//!
//! There are two little gotchas:
//!
//! * When we read the address we should be loading from, we need to give up if the address does
//! not match (we can't simply load from there, because it can be dangling by that point and we
//! don't know its type, so we need to use our address for all loading and we just check they
//! match). If we give up, we don't do that CaS into control, therefore we could have given up on
//! newer address than the control we have read. For that reason, the address is also stored by
//! reader with Release and we read it with Acquire, which'll bring an up to date version of
//! control into our thread and we re-read that one to confirm the address is indeed between
//! two same values holding the generation, therefore corresponding to it.
//! * The destructor doesn't have a SeqCst in the writer, because there was no write in there.
//! That's OK. We need to ensure there are no new readers after the "change" we confirm in the
//! writer and that change is the destruction by that time, the destroying thread has exclusive
//! ownership and therefore there can be no new readers.
use core::cell::Cell;
use core::ptr;
use core::sync::atomic::Ordering::*;
use core::sync::atomic::{AtomicPtr, AtomicUsize};
use super::Debt;
use crate::RefCnt;
pub const REPLACEMENT_TAG: usize = 0b01;
pub const GEN_TAG: usize = 0b10;
pub const TAG_MASK: usize = 0b11;
pub const IDLE: usize = 0;
/// Thread local data for the helping strategy.
#[derive(Default)]
pub(super) struct Local {
// The generation counter.
generation: Cell<usize>,
}
// Make sure the pointers have 2 empty bits. Always.
#[derive(Default)]
#[repr(align(4))]
struct Handover(AtomicUsize);
/// The slots for the helping strategy.
pub(super) struct Slots {
/// The control structure of the slot.
///
/// Different threads signal what stage they are in in there. It can contain:
///
/// * `IDLE` (nothing is happening, and there may or may not be an active debt).
/// * a generation, tagged with GEN_TAG. The reader is trying to acquire a slot right now and a
/// writer might try to help out.
/// * A replacement pointer, tagged with REPLACEMENT_TAG. This pointer points to an Handover,
/// containing an already protected value, provided by the writer for the benefit of the
/// reader. The reader should abort its own debt and use this instead. This indirection
/// (storing pointer to the envelope with the actual pointer) is to make sure there's a space
/// for the tag there is no guarantee the real pointer is aligned to at least 4 bytes, we
/// can however force that for the Handover type.
control: AtomicUsize,
/// A possibly active debt.
slot: Debt,
/// If there's a generation in control, this signifies what address the reader is trying to
/// load from.
active_addr: AtomicUsize,
/// A place where a writer can put a replacement value.
///
/// Note that this is simply an allocation, and every participating slot contributes one, but
/// they may be passed around through the lifetime of the program. It is not accessed directly,
/// but through the space_offer thing.
///
handover: Handover,
/// A pointer to a handover envelope this node currently owns.
///
/// A writer makes a switch of its and readers handover when successfully storing a replacement
/// in the control.
space_offer: AtomicPtr<Handover>,
}
impl Default for Slots {
fn default() -> Self {
Slots {
control: AtomicUsize::new(IDLE),
slot: Debt::default(),
// Doesn't matter yet
active_addr: AtomicUsize::new(0),
// Also doesn't matter
handover: Handover::default(),
// Here we would like it to point to our handover. But for that we need to be in place
// in RAM (effectively pinned, though we use older Rust than Pin, possibly?), so not
// yet. See init().
space_offer: AtomicPtr::new(ptr::null_mut()),
}
}
}
impl Slots {
pub(super) fn slot(&self) -> &Debt {
&self.slot
}
pub(super) fn get_debt(&self, ptr: usize, local: &Local) -> (usize, bool) {
// Incrementing by 4 ensures we always have enough space for 2 bit of tags.
let gen = local.generation.get().wrapping_add(4);
debug_assert_eq!(gen & GEN_TAG, 0);
local.generation.set(gen);
// Signal the caller that the node should be sent to a cooldown.
let discard = gen == 0;
let gen = gen | GEN_TAG;
// We will sync by the write to the control. But we also sync the value of the previous
// generation/released slot. That way we may re-confirm in the writer that the reader is
// not in between here and the compare_exchange below with a stale gen (eg. if we are in
// here, the re-confirm there will load the NO_DEPT and we are fine).
self.active_addr.store(ptr, SeqCst);
// We are the only ones allowed to do the IDLE -> * transition and we never leave it in
// anything else after an transaction, so this is OK. But we still need a load-store SeqCst
// operation here to form a relation between this and the store of the actual pointer in
// the writer thread :-(.
let prev = self.control.swap(gen, SeqCst);
debug_assert_eq!(IDLE, prev, "Left control in wrong state");
(gen, discard)
}
pub(super) fn help<R, T>(&self, who: &Self, storage_addr: usize, replacement: &R)
where
T: RefCnt,
R: Fn() -> T,
{
debug_assert_eq!(IDLE, self.control.load(Relaxed));
// Also acquires the auxiliary data in other variables.
let mut control = who.control.load(SeqCst);
loop {
match control & TAG_MASK {
// Nothing to help with
IDLE if control == IDLE => break,
// Someone has already helped out with that, so we have nothing to do here
REPLACEMENT_TAG => break,
// Something is going on, let's have a better look.
GEN_TAG => {
debug_assert!(
!ptr::eq(self, who),
"Refusing to help myself, makes no sense"
);
// Get the address that other thread is trying to load from. By that acquire,
// we also sync the control into our thread once more and reconfirm that the
// value of the active_addr is in between two same instances, therefore up to
// date to it.
let active_addr = who.active_addr.load(SeqCst);
if active_addr != storage_addr {
// Acquire for the same reason as on the top.
let new_control = who.control.load(SeqCst);
if new_control == control {
// The other thread is doing something, but to some other ArcSwap, so
// we don't care. Cool, done.
break;
} else {
// The control just changed under our hands, we don't know what to
// trust, so retry.
control = new_control;
continue;
}
}
// Now we know this work is for us. Try to create a replacement and offer it.
// This actually does a full-featured load under the hood, but we are currently
// idle and the load doesn't re-enter write, so that's all fine.
let replacement = replacement();
let replace_addr = T::as_ptr(&replacement) as usize;
// If we succeed in helping the other thread, we take their empty space in
// return for us that we pass to them. It's already there, the value is synced
// to us by Acquire on control.
let their_space = who.space_offer.load(SeqCst);
// Relaxed is fine, our own thread and nobody but us writes in here.
let my_space = self.space_offer.load(SeqCst);
// Relaxed is fine, we'll sync by the next compare-exchange. If we don't, the
// value won't ever be read anyway.
unsafe {
(*my_space).0.store(replace_addr, SeqCst);
}
// Ensured by the align annotation at the type.
assert_eq!(my_space as usize & TAG_MASK, 0);
let space_addr = (my_space as usize) | REPLACEMENT_TAG;
// Acquire on failure -> same reason as at the top, reading the value.
// Release on success -> we send data to that thread through here. Must be
// AcqRel, because success must be superset of failure. Also, load to get their
// space (it won't have changed, it does when the control is set to IDLE).
match who
.control
.compare_exchange(control, space_addr, SeqCst, SeqCst)
{
Ok(_) => {
// We have successfully sent our replacement out (Release) and got
// their space in return (Acquire on that load above).
self.space_offer.store(their_space, SeqCst);
// The ref count went with it, so forget about it here.
T::into_ptr(replacement);
// We have successfully helped out, so we are done.
break;
}
Err(new_control) => {
// Something has changed in between. Let's try again, nothing changed
// (the replacement will get dropped at the end of scope, we didn't do
// anything with the spaces, etc.
control = new_control;
}
}
}
_ => unreachable!("Invalid control value {:X}", control),
}
}
}
pub(super) fn init(&mut self) {
*self.space_offer.get_mut() = &mut self.handover;
}
pub(super) fn confirm(&self, gen: usize, ptr: usize) -> Result<(), usize> {
// Put the slot there and consider it acquire of a „lock“. For that we need swap, not store
// only (we need Acquire and Acquire works only on loads). Release is to make sure control
// is observable by the other thread (but that's probably not necessary anyway?)
let prev = self.slot.0.swap(ptr, SeqCst);
debug_assert_eq!(Debt::NONE, prev);
// Confirm by writing to the control (or discover that we got helped). We stop anyone else
// from helping by setting it to IDLE.
let control = self.control.swap(IDLE, SeqCst);
if control == gen {
// Nobody interfered, we have our debt in place and can proceed.
Ok(())
} else {
// Someone put a replacement in there.
debug_assert_eq!(control & TAG_MASK, REPLACEMENT_TAG);
let handover = (control & !TAG_MASK) as *mut Handover;
let replacement = unsafe { &*handover }.0.load(SeqCst);
// Make sure we advertise the right envelope when we set it to generation next time.
self.space_offer.store(handover, SeqCst);
// Note we've left the debt in place. The caller should pay it back (without ever
// taking advantage of it) to make sure any extra is actually dropped (it is possible
// someone provided the replacement *and* paid the debt and we need just one of them).
Err(replacement)
}
}
}

View file

@ -1,371 +0,0 @@
//! A linked list of debt nodes.
//!
//! A node may or may not be owned by a thread. Reader debts are allocated in its owned node,
//! writer walks everything (but may also use some owned values).
//!
//! The list is prepend-only if thread dies, the node lives on (and can be claimed by another
//! thread later on). This makes the implementation much simpler, since everything here is
//! `'static` and we don't have to care about knowing when to free stuff.
//!
//! The nodes contain both the fast primary slots and a secondary fallback ones.
//!
//! # Synchronization
//!
//! We synchronize several things here.
//!
//! The addition of nodes is synchronized through the head (Load on each read, AcqReal on each
//! attempt to add another node). Note that certain parts never change after that (they aren't even
//! atomic) and other things that do change take care of themselves (the debt slots have their own
//! synchronization, etc).
//!
//! The ownership is acquire-release lock pattern.
//!
//! Similar, the counting of active writers is an acquire-release lock pattern.
//!
//! We also do release-acquire "send" from the start-cooldown to check-cooldown to make sure we see
//! at least as up to date value of the writers as when the cooldown started. That we if we see 0,
//! we know it must have happened since then.
use core::cell::Cell;
use core::ptr;
use core::slice::Iter;
use core::sync::atomic::Ordering::*;
use core::sync::atomic::{AtomicPtr, AtomicUsize};
#[cfg(feature = "experimental-thread-local")]
use core::cell::OnceCell;
use alloc::boxed::Box;
use super::fast::{Local as FastLocal, Slots as FastSlots};
use super::helping::{Local as HelpingLocal, Slots as HelpingSlots};
use super::Debt;
use crate::RefCnt;
const NODE_UNUSED: usize = 0;
const NODE_USED: usize = 1;
const NODE_COOLDOWN: usize = 2;
/// The head of the debt linked list.
static LIST_HEAD: AtomicPtr<Node> = AtomicPtr::new(ptr::null_mut());
pub struct NodeReservation<'a>(&'a Node);
impl Drop for NodeReservation<'_> {
fn drop(&mut self) {
self.0.active_writers.fetch_sub(1, Release);
}
}
/// One thread-local node for debts.
#[repr(C, align(64))]
pub(crate) struct Node {
fast: FastSlots,
helping: HelpingSlots,
in_use: AtomicUsize,
// Next node in the list.
//
// It is a pointer because we touch it before synchronization (we don't _dereference_ it before
// synchronization, only manipulate the pointer itself). That is illegal according to strict
// interpretation of the rules by MIRI on references.
next: *const Node,
active_writers: AtomicUsize,
}
impl Default for Node {
fn default() -> Self {
Node {
fast: FastSlots::default(),
helping: HelpingSlots::default(),
in_use: AtomicUsize::new(NODE_USED),
next: ptr::null(),
active_writers: AtomicUsize::new(0),
}
}
}
impl Node {
/// Goes through the debt linked list.
///
/// This traverses the linked list, calling the closure on each node. If the closure returns
/// `Some`, it terminates with that value early, otherwise it runs to the end.
pub(crate) fn traverse<R, F: FnMut(&'static Node) -> Option<R>>(mut f: F) -> Option<R> {
// Acquire we want to make sure we read the correct version of data at the end of the
// pointer. Any write to the DEBT_HEAD is with Release.
//
// Furthermore, we need to see the newest version of the list in case we examine the debts
// - if a new one is added recently, we don't want a stale read -> SeqCst.
//
// Note that the other pointers in the chain never change and are *ordinary* pointers. The
// whole linked list is synchronized through the head.
let mut current = unsafe { LIST_HEAD.load(SeqCst).as_ref() };
while let Some(node) = current {
let result = f(node);
if result.is_some() {
return result;
}
current = unsafe { node.next.as_ref() };
}
None
}
/// Put the current thread node into cooldown
fn start_cooldown(&self) {
// Trick: Make sure we have an up to date value of the active_writers in this thread, so we
// can properly release it below.
let _reservation = self.reserve_writer();
assert_eq!(NODE_USED, self.in_use.swap(NODE_COOLDOWN, Release));
}
/// Perform a cooldown if the node is ready.
///
/// See the ABA protection at the [helping].
fn check_cooldown(&self) {
// Check if the node is in cooldown, for two reasons:
// * Skip most of nodes fast, without dealing with them.
// * More importantly, sync the value of active_writers to be at least the value when the
// cooldown started. That way we know the 0 we observe happened some time after
// start_cooldown.
if self.in_use.load(Acquire) == NODE_COOLDOWN {
// The rest can be nicely relaxed no memory is being synchronized by these
// operations. We just see an up to date 0 and allow someone (possibly us) to claim the
// node later on.
if self.active_writers.load(Relaxed) == 0 {
let _ = self
.in_use
.compare_exchange(NODE_COOLDOWN, NODE_UNUSED, Relaxed, Relaxed);
}
}
}
/// Mark this node that a writer is currently playing with it.
pub fn reserve_writer(&self) -> NodeReservation {
self.active_writers.fetch_add(1, Acquire);
NodeReservation(self)
}
/// "Allocate" a node.
///
/// Either a new one is created, or previous one is reused. The node is claimed to become
/// in_use.
fn get() -> &'static Self {
// Try to find an unused one in the chain and reuse it.
Self::traverse(|node| {
node.check_cooldown();
if node
.in_use
// We claim a unique control over the generation and the right to write to slots if
// they are NO_DEPT
.compare_exchange(NODE_UNUSED, NODE_USED, SeqCst, Relaxed)
.is_ok()
{
Some(node)
} else {
None
}
})
// If that didn't work, create a new one and prepend to the list.
.unwrap_or_else(|| {
let node = Box::leak(Box::<Node>::default());
node.helping.init();
// We don't want to read any data in addition to the head, Relaxed is fine
// here.
//
// We do need to release the data to others, but for that, we acquire in the
// compare_exchange below.
let mut head = LIST_HEAD.load(Relaxed);
loop {
node.next = head;
if let Err(old) = LIST_HEAD.compare_exchange_weak(
head, node,
// We need to release *the whole chain* here. For that, we need to
// acquire it first.
//
// SeqCst because we need to make sure it is properly set "before" we do
// anything to the debts.
SeqCst, Relaxed, // Nothing changed, go next round of the loop.
) {
head = old;
} else {
return node;
}
}
})
}
/// Iterate over the fast slots.
pub(crate) fn fast_slots(&self) -> Iter<Debt> {
self.fast.into_iter()
}
/// Access the helping slot.
pub(crate) fn helping_slot(&self) -> &Debt {
self.helping.slot()
}
}
/// A wrapper around a node pointer, to un-claim the node on thread shutdown.
pub(crate) struct LocalNode {
/// Node for this thread, if any.
///
/// We don't necessarily have to own one, but if we don't, we'll get one before the first use.
node: Cell<Option<&'static Node>>,
/// Thread-local data for the fast slots.
fast: FastLocal,
/// Thread local data for the helping strategy.
helping: HelpingLocal,
}
impl LocalNode {
#[cfg(not(feature = "experimental-thread-local"))]
pub(crate) fn with<R, F: FnOnce(&LocalNode) -> R>(f: F) -> R {
let f = Cell::new(Some(f));
THREAD_HEAD
.try_with(|head| {
if head.node.get().is_none() {
head.node.set(Some(Node::get()));
}
let f = f.take().unwrap();
f(head)
})
// During the application shutdown, the thread local storage may be already
// deallocated. In that case, the above fails but we still need something. So we just
// find or allocate a node and use it just once.
//
// Note that the situation should be very very rare and not happen often, so the slower
// performance doesn't matter that much.
.unwrap_or_else(|_| {
let tmp_node = LocalNode {
node: Cell::new(Some(Node::get())),
fast: FastLocal::default(),
helping: HelpingLocal::default(),
};
let f = f.take().unwrap();
f(&tmp_node)
// Drop of tmp_node -> sends the node we just used into cooldown.
})
}
#[cfg(feature = "experimental-thread-local")]
pub(crate) fn with<R, F: FnOnce(&LocalNode) -> R>(f: F) -> R {
let thread_head = THREAD_HEAD.get_or_init(|| LocalNode {
node: Cell::new(None),
fast: FastLocal::default(),
helping: HelpingLocal::default(),
});
if thread_head.node.get().is_none() {
thread_head.node.set(Some(Node::get()));
}
f(&thread_head)
}
/// Creates a new debt.
///
/// This stores the debt of the given pointer (untyped, casted into an usize) and returns a
/// reference to that slot, or gives up with `None` if all the slots are currently full.
#[inline]
pub(crate) fn new_fast(&self, ptr: usize) -> Option<&'static Debt> {
let node = &self.node.get().expect("LocalNode::with ensures it is set");
debug_assert_eq!(node.in_use.load(Relaxed), NODE_USED);
node.fast.get_debt(ptr, &self.fast)
}
/// Initializes a helping slot transaction.
///
/// Returns the generation (with tag).
pub(crate) fn new_helping(&self, ptr: usize) -> usize {
let node = &self.node.get().expect("LocalNode::with ensures it is set");
debug_assert_eq!(node.in_use.load(Relaxed), NODE_USED);
let (gen, discard) = node.helping.get_debt(ptr, &self.helping);
if discard {
// Too many generations happened, make sure the writers give the poor node a break for
// a while so they don't observe the generation wrapping around.
node.start_cooldown();
self.node.take();
}
gen
}
/// Confirm the helping transaction.
///
/// The generation comes from previous new_helping.
///
/// Will either return a debt with the pointer, or a debt to pay and a replacement (already
/// protected) address.
pub(crate) fn confirm_helping(
&self,
gen: usize,
ptr: usize,
) -> Result<&'static Debt, (&'static Debt, usize)> {
let node = &self.node.get().expect("LocalNode::with ensures it is set");
debug_assert_eq!(node.in_use.load(Relaxed), NODE_USED);
let slot = node.helping_slot();
node.helping
.confirm(gen, ptr)
.map(|()| slot)
.map_err(|repl| (slot, repl))
}
/// The writer side of a helping slot.
///
/// This potentially helps the `who` node (uses self as the local node, which must be
/// different) by loading the address that one is trying to load.
pub(super) fn help<R, T>(&self, who: &Node, storage_addr: usize, replacement: &R)
where
T: RefCnt,
R: Fn() -> T,
{
let node = &self.node.get().expect("LocalNode::with ensures it is set");
debug_assert_eq!(node.in_use.load(Relaxed), NODE_USED);
node.helping.help(&who.helping, storage_addr, replacement)
}
}
impl Drop for LocalNode {
fn drop(&mut self) {
if let Some(node) = self.node.get() {
// Release - syncing writes/ownership of this Node
node.start_cooldown();
}
}
}
#[cfg(not(feature = "experimental-thread-local"))]
thread_local! {
/// A debt node assigned to this thread.
static THREAD_HEAD: LocalNode = LocalNode {
node: Cell::new(None),
fast: FastLocal::default(),
helping: HelpingLocal::default(),
};
}
#[cfg(feature = "experimental-thread-local")]
#[thread_local]
/// A debt node assigned to this thread.
static THREAD_HEAD: OnceCell<LocalNode> = OnceCell::new();
#[cfg(test)]
mod tests {
use super::*;
impl Node {
fn is_empty(&self) -> bool {
self.fast_slots()
.chain(core::iter::once(self.helping_slot()))
.all(|d| d.0.load(Relaxed) == Debt::NONE)
}
fn get_thread() -> &'static Self {
LocalNode::with(|h| h.node.get().unwrap())
}
}
/// A freshly acquired thread local node is empty.
#[test]
fn new_empty() {
assert!(Node::get_thread().is_empty());
}
}

View file

@ -1,137 +0,0 @@
//! Debt handling.
//!
//! A debt is a reference count of a smart pointer that is owed. This module provides a lock-free
//! storage for debts.
//!
//! Each thread has its own node with bunch of slots. Only that thread can allocate debts in there,
//! but others are allowed to inspect and pay them. The nodes form a linked list for the reason of
//! inspection. The nodes are never removed (even after the thread terminates), but if the thread
//! gives it up, another (new) thread can claim it.
//!
//! The writers walk the whole chain and pay the debts (by bumping the ref counts) of the just
//! removed pointer.
//!
//! Each node has some fast (but fallible) nodes and a fallback node, with different algorithms to
//! claim them (see the relevant submodules).
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering::*;
pub(crate) use self::list::{LocalNode, Node};
use super::RefCnt;
mod fast;
mod helping;
mod list;
/// One debt slot.
///
/// It may contain an „owed“ reference count.
#[derive(Debug)]
pub(crate) struct Debt(pub(crate) AtomicUsize);
impl Debt {
/// The value of pointer `3` should be pretty safe, for two reasons:
///
/// * It's an odd number, but the pointers we have are likely aligned at least to the word size,
/// because the data at the end of the `Arc` has the counters.
/// * It's in the very first page where NULL lives, so it's not mapped.
pub(crate) const NONE: usize = 0b11;
}
impl Default for Debt {
fn default() -> Self {
Debt(AtomicUsize::new(Self::NONE))
}
}
impl Debt {
/// Tries to pay the given debt.
///
/// If the debt is still there, for the given pointer, it is paid and `true` is returned. If it
/// is empty or if there's some other pointer, it is not paid and `false` is returned, meaning
/// the debt was paid previously by someone else.
///
/// # Notes
///
/// * It is possible that someone paid the debt and then someone else put a debt for the same
/// pointer in there. This is fine, as we'll just pay the debt for that someone else.
/// * This relies on the fact that the same pointer must point to the same object and
/// specifically to the same type the caller provides the type, it's destructor, etc.
/// * It also relies on the fact the same thing is not stuffed both inside an `Arc` and `Rc` or
/// something like that, but that sounds like a reasonable assumption. Someone storing it
/// through `ArcSwap<T>` and someone else with `ArcSwapOption<T>` will work.
#[inline]
pub(crate) fn pay<T: RefCnt>(&self, ptr: *const T::Base) -> bool {
self.0
// If we don't change anything because there's something else, Relaxed is fine.
//
// The Release works as kind of Mutex. We make sure nothing from the debt-protected
// sections leaks below this point.
//
// Note that if it got paid already, it is inside the reference count. We don't
// necessarily observe that increment, but whoever destroys the pointer *must* see the
// up to date value, with all increments already counted in (the Arc takes care of that
// part).
.compare_exchange(ptr as usize, Self::NONE, Release, Relaxed)
.is_ok()
}
/// Pays all the debts on the given pointer and the storage.
pub(crate) fn pay_all<T, R>(ptr: *const T::Base, storage_addr: usize, replacement: R)
where
T: RefCnt,
R: Fn() -> T,
{
LocalNode::with(|local| {
let val = unsafe { T::from_ptr(ptr) };
// Pre-pay one ref count that can be safely put into a debt slot to pay it.
T::inc(&val);
Node::traverse::<(), _>(|node| {
// Make the cooldown trick know we are poking into this node.
let _reservation = node.reserve_writer();
local.help(node, storage_addr, &replacement);
let all_slots = node
.fast_slots()
.chain(core::iter::once(node.helping_slot()));
for slot in all_slots {
// Note: Release is enough even here. That makes sure the increment is
// visible to whoever might acquire on this slot and can't leak below this.
// And we are the ones doing decrements anyway.
if slot.pay::<T>(ptr) {
// Pre-pay one more, for another future slot
T::inc(&val);
}
}
None
});
// Implicit dec by dropping val in here, pair for the above
})
}
}
#[cfg(test)]
mod tests {
use alloc::sync::Arc;
/// Checks the assumption that arcs to ZSTs have different pointer values.
#[test]
fn arc_zst() {
struct A;
struct B;
let a = Arc::new(A);
let b = Arc::new(B);
let aref: &A = &a;
let bref: &B = &b;
let aptr = aref as *const _ as usize;
let bptr = bref as *const _ as usize;
assert_ne!(aptr, bptr);
}
}

View file

@ -1,106 +0,0 @@
//! Internal details.
//!
//! While the other parts of documentation are useful to users of the crate, this part is probably
//! helpful only if you want to look into the code or are curious about how it works internally.
//!
//! Also note that any of these details may change in future versions and are not part of the
//! stability guarantees. Don't rely on anything here.
//!
//! # Storing the [`Arc`].
//!
//! The [`Arc`] can be turned into a raw pointer and back. This is abstracted by the [`RefCnt`]
//! trait and it is technically possible to implement it for custom types (this crate also
//! implements it for [`Rc`] and [`Weak`], though the actual usefulness of these is a bit
//! questionable).
//!
//! The raw pointer is stored inside an [`AtomicPtr`].
//!
//! # Protection of reference counts
//!
//! The first idea would be to just use [`AtomicPtr`] with whatever the [`Arc::into_raw`] returns.
//! Then replacing it would be fine (there's no need to update ref counts). The load needs to
//! increment the reference count one still stays inside and another is returned to the caller.
//! This is done by re-creating the Arc from the raw pointer and then cloning it, throwing one
//! instance away (without destroying it).
//!
//! This approach has a problem. There's a short time between we read the raw pointer and increment
//! the count. If some other thread replaces the stored Arc and throws it away, the ref count could
//! drop to 0, get destroyed and we would be trying to bump ref counts in a ghost, which would be
//! totally broken.
//!
//! To prevent this, we actually use two approaches in a hybrid manner.
//!
//! The first one is based on hazard pointers idea, but slightly modified. There's a global
//! repository of pointers that owe a reference. When someone swaps a pointer, it walks this list
//! and pays all the debts (and takes them out of the repository).
//!
//! For simplicity and performance, storing into the repository is fallible. If storing into the
//! repository fails (because the thread used up all its own slots, or because the pointer got
//! replaced in just the wrong moment and it can't confirm the reservation), unlike the full
//! hazard-pointers approach, we don't retry, but fall back onto secondary strategy.
//!
//! The secondary strategy is similar, but a bit more complex (and therefore slower, that's why it
//! is only a fallback). We first publish an intent to read a pointer (and where we are reading it
//! from). Then we actually do so and publish the debt, like previously.
//!
//! The writer pays the debts as usual. But also, if it sees the intent to read the value, it helps
//! along, reads it, bumps the reference and passes it to the reader. Therefore, if the reader
//! fails to do the protection itself, because it got interrupted by a writer, it finds a
//! ready-made replacement value it can just use and doesn't have to retry. Also, the writer
//! doesn't have to wait for the reader in any way, because it can just solve its problem and move
//! on.
//!
//! # Unsafety
//!
//! All the uses of the unsafe keyword is just to turn the raw pointer back to Arc. It originated
//! from an Arc in the first place, so the only thing to ensure is it is still valid. That means its
//! ref count never dropped to 0.
//!
//! At the beginning, there's ref count of 1 stored in the raw pointer (and maybe some others
//! elsewhere, but we can't rely on these). This 1 stays there for the whole time the pointer is
//! stored there. When the arc is replaced, this 1 is returned to the caller, so we just have to
//! make sure no more readers access it by that time.
//!
//! # Leases and debts
//!
//! Instead of incrementing the reference count, the pointer reference can be owed. In such case, it
//! is recorded into a global storage. As each thread has its own storage (the global storage is
//! composed of multiple thread storages), the readers don't contend. When the pointer is no longer
//! in use, the debt is erased.
//!
//! The writer pays all the existing debts, therefore the reader have the full Arc with ref count at
//! that time. The reader is made aware the debt was paid and decrements the reference count.
//!
//! # Memory orders
//!
//! ## Synchronizing the data pointed to by the pointer.
//!
//! We have AcqRel (well, SeqCst, but that's included) on the swap and Acquire on the loads. In case
//! of the double read around the debt allocation, we do that on the *second*, because of ABA.
//! That's also why that SeqCst on the allocation of debt itself is not enough.
//! the *latest* decrement. By making both the increment and decrement AcqRel, we effectively chain
//! the edges together.
//!
//! # Memory orders around debts
//!
//! The linked list of debt nodes only grows. The shape of the list (existence of nodes) is
//! synchronized through Release on creation and Acquire on load on the head pointer.
//!
//! The debts work similar to locks Acquire and Release make all the pointer manipulation at the
//! interval where it is written down. However, we use the SeqCst on the allocation of the debt
//! because when we see an empty slot, we need to make sure that it happened after we have
//! overwritten the pointer.
//!
//! In case the writer pays the debt, it sees the new enough data (for the same reasons the stale
//! empties are not seen). The reference count on the Arc is AcqRel and makes sure it is not
//! destroyed too soon. The writer traverses all the slots, therefore they don't need to synchronize
//! with each other.
//!
//! Further details are inside the internal `debt` module.
//!
//! [`RefCnt`]: crate::RefCnt
//! [`Arc`]: std::sync::Arc
//! [`Arc::into_raw`]: std::sync::Arc::into_raw
//! [`Rc`]: std::rc::Rc
//! [`Weak`]: std::sync::Weak
//! [`AtomicPtr`]: std::sync::atomic::AtomicPtr

View file

@ -1,53 +0,0 @@
//! Limitations and common pitfalls.
//!
//! # Sized types
//!
//! This currently works only for `Sized` types. Unsized types have „fat pointers“, which are twice
//! as large as the normal ones. The [`AtomicPtr`] doesn't support them. One could use something
//! like `AtomicU128` for them. The catch is this doesn't exist and the difference would make it
//! really hard to implement the debt storage/stripped down hazard pointers.
//!
//! A workaround is to use double indirection:
//!
//! ```rust
//! # use arc_swap::ArcSwap;
//! // This doesn't work:
//! // let data: ArcSwap<[u8]> = ArcSwap::new(Arc::from([1, 2, 3]));
//!
//! // But this does:
//! let data: ArcSwap<Box<[u8]>> = ArcSwap::from_pointee(Box::new([1, 2, 3]));
//! # drop(data);
//! ```
//!
//! It also may be possible to use `ArcSwap` with the [`triomphe::ThinArc`] (that crate needs
//! enabling a feature flag to cooperate with `ArcSwap`).
//!
//! # Too many [`Guard`]s
//!
//! There's only limited number of "fast" slots for borrowing from [`ArcSwap`] for each single
//! thread (currently 8, but this might change in future versions). If these run out, the algorithm
//! falls back to slower path.
//!
//! If too many [`Guard`]s are kept around, the performance might be poor. These are not intended
//! to be stored in data structures or used across async yield points.
//!
//! [`ArcSwap`]: crate::ArcSwap
//! [`Guard`]: crate::Guard
//! [`AtomicPtr`]: std::sync::atomic::AtomicPtr
//!
//! # No `Clone` implementation
//!
//! Previous version implemented [`Clone`], but it turned out to be very confusing to people, since
//! it created fully independent [`ArcSwap`]. Users expected the instances to be tied to each
//! other, that store in one would change the result of future load of the other.
//!
//! To emulate the original behaviour, one can do something like this:
//!
//! ```rust
//! # use arc_swap::ArcSwap;
//! # let old = ArcSwap::from_pointee(42);
//! let new = ArcSwap::new(old.load_full());
//! # let _ = new;
//! ```
//!
//! [`triomphe::ThinArc`]: https://docs.rs/triomphe/latest/triomphe/struct.ThinArc.html

View file

@ -1,54 +0,0 @@
//! Additional documentation.
//!
//! Here we have some more general topics that might be good to know that just don't fit to the
//! crate level intro.
//!
//! Also, there were some previous blog posts about the crate which you might find interesting.
//!
//! # Atomic orderings
//!
//! Each operation on the [`ArcSwapAny`] with [`DefaultStrategy`] type callable concurrently (eg.
//! [`load`], but not [`into_inner`]) contains at least one [`SeqCst`] atomic read-write operation,
//! therefore even operations on different instances have a defined global order of operations.
//!
//! # Features
//!
//! The `weak` feature adds the ability to use arc-swap with the [`Weak`] pointer too,
//! through the [`ArcSwapWeak`] type. The needed std support is stabilized in rust version 1.45 (as
//! of now in beta).
//!
//! The `experimental-strategies` enables few more strategies that can be used. Note that these
//! **are not** part of the API stability guarantees and they may be changed, renamed or removed at
//! any time.
//!
//! The `experimental-thread-local` feature can be used to build arc-swap for `no_std` targets, by
//! replacing occurences of [`std::thread_local!`] with the `#[thread_local]` directive. This
//! requires a nightly Rust compiler as it makes use of the experimental
//! [`thread_local`](https://doc.rust-lang.org/unstable-book/language-features/thread-local.html)
//! feature. Using this features, thread-local variables are compiled using LLVM built-ins, which
//! have [several underlying modes of
//! operation](https://doc.rust-lang.org/beta/unstable-book/compiler-flags/tls-model.html). To add
//! support for thread-local variables on a platform that does not have OS or linker support, the
//! easiest way is to use `-Ztls-model=emulated` and to implement `__emutls_get_address` by hand,
//! as in [this
//! example](https://opensource.apple.com/source/clang/clang-800.0.38/src/projects/compiler-rt/lib/builtins/emutls.c.auto.html)
//! from Clang.
//!
//! # Minimal compiler version
//!
//! The `1` versions will compile on all compilers supporting the 2018 edition. Note that this
//! applies only if no additional feature flags are enabled and does not apply to compiling or
//! running tests.
//!
//! [`ArcSwapAny`]: crate::ArcSwapAny
//! [`ArcSwapWeak`]: crate::ArcSwapWeak
//! [`load`]: crate::ArcSwapAny::load
//! [`into_inner`]: crate::ArcSwapAny::into_inner
//! [`DefaultStrategy`]: crate::DefaultStrategy
//! [`SeqCst`]: std::sync::atomic::Ordering::SeqCst
//! [`Weak`]: std::sync::Weak
pub mod internal;
pub mod limitations;
pub mod patterns;
pub mod performance;

View file

@ -1,271 +0,0 @@
//! Common use patterns
//!
//! Here are some common patterns one can use for inspiration. These are mostly covered by examples
//! at the right type in the crate, but this lists them at a single place.
//!
//! # Sharing of configuration data
//!
//! We want to share configuration from some source with rare updates to some high performance
//! worker threads. It can be configuration in its true sense, or a routing table.
//!
//! The idea here is, each new version is a newly allocated in its own [`Arc`]. It is then stored
//! into a *shared* `ArcSwap` instance.
//!
//! Each worker then loads the current version before each work chunk. In case a new version is
//! stored, the worker keeps using the loaded one until it ends the work chunk and, if it's the
//! last one to have the version, deallocates it automatically by dropping the [`Guard`]
//!
//! Note that the configuration needs to be passed through a *single shared* [`ArcSwap`]. That
//! means we need to share that instance and we do so through an [`Arc`] (one could use a global
//! variable instead).
//!
//! Therefore, what we have is `Arc<ArcSwap<Config>>`.
//!
//! ```rust
//! # use std::sync::Arc;
//! # use std::sync::atomic::{AtomicBool, Ordering};
//! # use std::thread;
//! # use std::time::Duration;
//! #
//! # use arc_swap::ArcSwap;
//! # struct Work;
//! # impl Work { fn fetch() -> Self { Work } fn perform(&self, _: &Config) {} }
//! #
//! #[derive(Debug, Default)]
//! struct Config {
//! // ... Stuff in here ...
//! }
//!
//! // We wrap the ArcSwap into an Arc, so we can share it between threads.
//! let config = Arc::new(ArcSwap::from_pointee(Config::default()));
//!
//! let terminate = Arc::new(AtomicBool::new(false));
//! let mut threads = Vec::new();
//!
//! // The configuration thread
//! threads.push(thread::spawn({
//! let config = Arc::clone(&config);
//! let terminate = Arc::clone(&terminate);
//! move || {
//! while !terminate.load(Ordering::Relaxed) {
//! thread::sleep(Duration::from_secs(6));
//! // Actually, load it from somewhere
//! let new_config = Arc::new(Config::default());
//! config.store(new_config);
//! }
//! }
//! }));
//!
//! // The worker thread
//! for _ in 0..10 {
//! threads.push(thread::spawn({
//! let config = Arc::clone(&config);
//! let terminate = Arc::clone(&terminate);
//! move || {
//! while !terminate.load(Ordering::Relaxed) {
//! let work = Work::fetch();
//! let config = config.load();
//! work.perform(&config);
//! }
//! }
//! }));
//! }
//!
//! // Terminate gracefully
//! terminate.store(true, Ordering::Relaxed);
//! for thread in threads {
//! thread.join().unwrap();
//! }
//! ```
//!
//! # Consistent snapshots
//!
//! While one probably wants to get a fresh instance every time a work chunk is available,
//! therefore there would be one [`load`] for each work chunk, it is often also important that the
//! configuration doesn't change in the *middle* of processing of one chunk. Therefore, one
//! commonly wants *exactly* one [`load`] for the work chunk, not *at least* one. If the processing
//! had multiple phases, one would use something like this:
//!
//! ```rust
//! # use std::sync::Arc;
//! #
//! # use arc_swap::ArcSwap;
//! # struct Config;
//! # struct Work;
//! # impl Work {
//! # fn fetch() -> Self { Work }
//! # fn phase_1(&self, _: &Config) {}
//! # fn phase_2(&self, _: &Config) {}
//! # }
//! # let config = Arc::new(ArcSwap::from_pointee(Config));
//! let work = Work::fetch();
//! let config = config.load();
//! work.phase_1(&config);
//! // We keep the same config value here
//! work.phase_2(&config);
//! ```
//!
//! Over this:
//!
//! ```rust
//! # use std::sync::Arc;
//! #
//! # use arc_swap::ArcSwap;
//! # struct Config;
//! # struct Work;
//! # impl Work {
//! # fn fetch() -> Self { Work }
//! # fn phase_1(&self, _: &Config) {}
//! # fn phase_2(&self, _: &Config) {}
//! # }
//! # let config = Arc::new(ArcSwap::from_pointee(Config));
//! let work = Work::fetch();
//! work.phase_1(&config.load());
//! // WARNING!! This is broken, because in between phase_1 and phase_2, the other thread could
//! // have replaced the config. Then each phase would be performed with a different one and that
//! // could lead to surprises.
//! work.phase_2(&config.load());
//! ```
//!
//! # Caching of the configuration
//!
//! Let's say that the work chunks are really small, but there's *a lot* of them to work on. Maybe
//! we are routing packets and the configuration is the routing table that can sometimes change,
//! but mostly doesn't.
//!
//! There's an overhead to [`load`]. If the work chunks are small enough, that could be measurable.
//! We can reach for [`Cache`]. It makes loads much faster (in the order of accessing local
//! variables) in case nothing has changed. It has two costs, it makes the load slightly slower in
//! case the thing *did* change (which is rare) and if the worker is inactive, it holds the old
//! cached value alive.
//!
//! This is OK for our use case, because the routing table is usually small enough so some stale
//! instances taking a bit of memory isn't an issue.
//!
//! The part that takes care of updates stays the same as above.
//!
//! ```rust
//! # use std::sync::Arc;
//! # use std::thread;
//! # use std::sync::atomic::{AtomicBool, Ordering};
//! # use arc_swap::{ArcSwap, Cache};
//! # struct Packet; impl Packet { fn receive() -> Self { Packet } }
//!
//! #[derive(Debug, Default)]
//! struct RoutingTable {
//! // ... Stuff in here ...
//! }
//!
//! impl RoutingTable {
//! fn route(&self, _: Packet) {
//! // ... Interesting things are done here ...
//! }
//! }
//!
//! let routing_table = Arc::new(ArcSwap::from_pointee(RoutingTable::default()));
//!
//! let terminate = Arc::new(AtomicBool::new(false));
//! let mut threads = Vec::new();
//!
//! for _ in 0..10 {
//! let t = thread::spawn({
//! let routing_table = Arc::clone(&routing_table);
//! let terminate = Arc::clone(&terminate);
//! move || {
//! let mut routing_table = Cache::new(routing_table);
//! while !terminate.load(Ordering::Relaxed) {
//! let packet = Packet::receive();
//! // This load is cheaper, because we cache in the private Cache thing.
//! // But if the above receive takes a long time, the Cache will keep the stale
//! // value alive until this time (when it will get replaced by up to date value).
//! let current = routing_table.load();
//! current.route(packet);
//! }
//! }
//! });
//! threads.push(t);
//! }
//!
//! // Shut down properly
//! terminate.store(true, Ordering::Relaxed);
//! for thread in threads {
//! thread.join().unwrap();
//! }
//! ```
//!
//! # Projecting into configuration field
//!
//! We have a larger application, composed of multiple components. Each component has its own
//! `ComponentConfig` structure. Then, the whole application has a `Config` structure that contains
//! a component config for each component:
//!
//! ```rust
//! # struct ComponentConfig;
//!
//! struct Config {
//! component: ComponentConfig,
//! // ... Some other components and things ...
//! }
//! # let c = Config { component: ComponentConfig };
//! # let _ = c.component;
//! ```
//!
//! We would like to use [`ArcSwap`] to push updates to the components. But for various reasons,
//! it's not a good idea to put the whole `ArcSwap<Config>` to each component, eg:
//!
//! * That would make each component depend on the top level config, which feels reversed.
//! * It doesn't allow reusing the same component in multiple applications, as these would have
//! different `Config` structures.
//! * One needs to build the whole `Config` for tests.
//! * There's a risk of entanglement, that the component would start looking at configuration of
//! different parts of code, which would be hard to debug.
//!
//! We also could have a separate `ArcSwap<ComponentConfig>` for each component, but that also
//! doesn't feel right, as we would have to push updates to multiple places and they could be
//! inconsistent for a while and we would have to decompose the `Config` structure into the parts,
//! because we need our things in [`Arc`]s to be put into [`ArcSwap`].
//!
//! This is where the [`Access`] trait comes into play. The trait abstracts over things that can
//! give access to up to date version of specific T. That can be a [`Constant`] (which is useful
//! mostly for the tests, where one doesn't care about the updating), it can be an
//! [`ArcSwap<T>`][`ArcSwap`] itself, but it also can be an [`ArcSwap`] paired with a closure to
//! project into the specific field. The [`DynAccess`] is similar, but allows type erasure. That's
//! more convenient, but a little bit slower.
//!
//! ```rust
//! # use std::sync::Arc;
//! # use arc_swap::ArcSwap;
//! # use arc_swap::access::{DynAccess, Map};
//!
//! #[derive(Debug, Default)]
//! struct ComponentConfig;
//!
//! struct Component {
//! config: Box<dyn DynAccess<ComponentConfig>>,
//! }
//!
//! #[derive(Debug, Default)]
//! struct Config {
//! component: ComponentConfig,
//! }
//!
//! let config = Arc::new(ArcSwap::from_pointee(Config::default()));
//!
//! let component = Component {
//! config: Box::new(Map::new(Arc::clone(&config), |config: &Config| &config.component)),
//! };
//! # let _ = component.config;
//! ```
//!
//! One would use `Box::new(Constant(ComponentConfig))` in unittests instead as the `config` field.
//!
//! The [`Cache`] has its own [`Access`][crate::cache::Access] trait for similar purposes.
//!
//! [`Arc`]: std::sync::Arc
//! [`Guard`]: crate::Guard
//! [`load`]: crate::ArcSwapAny::load
//! [`ArcSwap`]: crate::ArcSwap
//! [`Cache`]: crate::cache::Cache
//! [`Access`]: crate::access::Access
//! [`DynAccess`]: crate::access::DynAccess
//! [`Constant`]: crate::access::Constant

View file

@ -1,87 +0,0 @@
//! Performance characteristics.
//!
//! There are several performance advantages of [`ArcSwap`] over [`RwLock`].
//!
//! ## Lock-free readers
//!
//! All the read operations are always [lock-free]. Most of the time, they are actually
//! [wait-free]. They are [lock-free] from time to time, with at least `usize::MAX / 4` accesses
//! that are [wait-free] in between.
//!
//! Writers are [lock-free].
//!
//! Whenever the documentation talks about *contention* in the context of [`ArcSwap`], it talks
//! about contention on the CPU level multiple cores having to deal with accessing the same cache
//! line. This slows things down (compared to each one accessing its own cache line), but an
//! eventual progress is still guaranteed and the cost is significantly lower than parking threads
//! as with mutex-style contention.
//!
//! ## Speeds
//!
//! The base line speed of read operations is similar to using an *uncontended* [`Mutex`].
//! However, [`load`] suffers no contention from any other read operations and only slight
//! ones during updates. The [`load_full`] operation is additionally contended only on
//! the reference count of the [`Arc`] inside so, in general, while [`Mutex`] rapidly
//! loses its performance when being in active use by multiple threads at once and
//! [`RwLock`] is slow to start with, [`ArcSwap`] mostly keeps its performance even when read by
//! many threads in parallel.
//!
//! Write operations are considered expensive. A write operation is more expensive than access to
//! an *uncontended* [`Mutex`] and on some architectures even slower than uncontended
//! [`RwLock`]. However, it is faster than either under contention.
//!
//! There are some (very unscientific) [benchmarks] within the source code of the library, and the
//! [`DefaultStrategy`][crate::DefaultStrategy] has some numbers measured on my computer.
//!
//! The exact numbers are highly dependant on the machine used (both absolute numbers and relative
//! between different data structures). Not only architectures have a huge impact (eg. x86 vs ARM),
//! but even AMD vs. Intel or two different Intel processors. Therefore, if what matters is more
//! the speed than the wait-free guarantees, you're advised to do your own measurements.
//!
//! Further speed improvements may be gained by the use of the [`Cache`].
//!
//! ## Consistency
//!
//! The combination of [wait-free] guarantees of readers and no contention between concurrent
//! [`load`]s provides *consistent* performance characteristics of the synchronization mechanism.
//! This might be important for soft-realtime applications (the CPU-level contention caused by a
//! recent update/write operation might be problematic for some hard-realtime cases, though).
//!
//! ## Choosing the right reading operation
//!
//! There are several load operations available. While the general go-to one should be
//! [`load`], there may be situations in which the others are a better match.
//!
//! The [`load`] usually only borrows the instance from the shared [`ArcSwap`]. This makes
//! it faster, because different threads don't contend on the reference count. There are two
//! situations when this borrow isn't possible. If the content gets changed, all existing
//! [`Guard`]s are promoted to contain an owned instance. The promotion is done by the
//! writer, but the readers still need to decrement the reference counts of the old instance when
//! they no longer use it, contending on the count.
//!
//! The other situation derives from internal implementation. The number of borrows each thread can
//! have at each time (across all [`Guard`]s) is limited. If this limit is exceeded, an owned
//! instance is created instead.
//!
//! Therefore, if you intend to hold onto the loaded value for extended time span, you may prefer
//! [`load_full`]. It loads the pointer instance ([`Arc`]) without borrowing, which is
//! slower (because of the possible contention on the reference count), but doesn't consume one of
//! the borrow slots, which will make it more likely for following [`load`]s to have a slot
//! available. Similarly, if some API needs an owned `Arc`, [`load_full`] is more convenient and
//! potentially faster then first [`load`]ing and then cloning that [`Arc`].
//!
//! Additionally, it is possible to use a [`Cache`] to get further speed improvement at the
//! cost of less comfortable API and possibly keeping the older values alive for longer than
//! necessary.
//!
//! [`ArcSwap`]: crate::ArcSwap
//! [`Cache`]: crate::cache::Cache
//! [`Guard`]: crate::Guard
//! [`load`]: crate::ArcSwapAny::load
//! [`load_full`]: crate::ArcSwapAny::load_full
//! [`Arc`]: std::sync::Arc
//! [`Mutex`]: std::sync::Mutex
//! [`RwLock`]: std::sync::RwLock
//! [benchmarks]: https://github.com/vorner/arc-swap/tree/master/benchmarks
//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm#Lock-freedom
//! [wait-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm#Wait-freedom

File diff suppressed because it is too large Load diff

View file

@ -1,176 +0,0 @@
use core::mem;
use core::ptr;
use alloc::rc::Rc;
use alloc::sync::Arc;
/// A trait describing smart reference counted pointers.
///
/// Note that in a way [`Option<Arc<T>>`][Option] is also a smart reference counted pointer, just
/// one that can hold NULL.
///
/// The trait is unsafe, because a wrong implementation will break the [ArcSwapAny]
/// implementation and lead to UB.
///
/// This is not actually expected for downstream crate to implement, this is just means to reuse
/// code for [Arc] and [`Option<Arc>`][Option] variants. However, it is theoretically possible (if
/// you have your own [Arc] implementation).
///
/// It is also implemented for [Rc], but that is not considered very useful (because the
/// [ArcSwapAny] is not `Send` or `Sync`, therefore there's very little advantage for it to be
/// atomic).
///
/// # Safety
///
/// Aside from the obvious properties (like that incrementing and decrementing a reference count
/// cancel each out and that having less references tracked than how many things actually point to
/// the value is fine as long as the count doesn't drop to 0), it also must satisfy that if two
/// pointers have the same value, they point to the same object. This is specifically not true for
/// ZSTs, but it is true for `Arc`s of ZSTs, because they have the reference counts just after the
/// value. It would be fine to point to a type-erased version of the same object, though (if one
/// could use this trait with unsized types in the first place).
///
/// Furthermore, the type should be Pin (eg. if the type is cloned or moved, it should still
/// point/deref to the same place in memory).
///
/// [Arc]: std::sync::Arc
/// [Rc]: std::rc::Rc
/// [ArcSwapAny]: crate::ArcSwapAny
pub unsafe trait RefCnt: Clone {
/// The base type the pointer points to.
type Base;
/// Converts the smart pointer into a raw pointer, without affecting the reference count.
///
/// This can be seen as kind of freezing the pointer it'll be later converted back using
/// [`from_ptr`](#method.from_ptr).
///
/// The pointer must point to the value stored (and the value must be the same as one returned
/// by [`as_ptr`](#method.as_ptr).
fn into_ptr(me: Self) -> *mut Self::Base;
/// Provides a view into the smart pointer as a raw pointer.
///
/// This must not affect the reference count the pointer is only borrowed.
fn as_ptr(me: &Self) -> *mut Self::Base;
/// Converts a raw pointer back into the smart pointer, without affecting the reference count.
///
/// This is only called on values previously returned by [`into_ptr`](#method.into_ptr).
/// However, it is not guaranteed to be 1:1 relation `from_ptr` may be called more times than
/// `into_ptr` temporarily provided the reference count never drops under 1 during that time
/// (the implementation sometimes owes a reference). These extra pointers will either be
/// converted back using `into_ptr` or forgotten.
///
/// # Safety
///
/// This must not be called by code outside of this crate.
unsafe fn from_ptr(ptr: *const Self::Base) -> Self;
/// Increments the reference count by one.
///
/// Return the pointer to the inner thing as a side effect.
fn inc(me: &Self) -> *mut Self::Base {
Self::into_ptr(Self::clone(me))
}
/// Decrements the reference count by one.
///
/// Note this is called on a raw pointer (one previously returned by
/// [`into_ptr`](#method.into_ptr). This may lead to dropping of the reference count to 0 and
/// destruction of the internal pointer.
///
/// # Safety
///
/// This must not be called by code outside of this crate.
unsafe fn dec(ptr: *const Self::Base) {
drop(Self::from_ptr(ptr));
}
}
unsafe impl<T> RefCnt for Arc<T> {
type Base = T;
fn into_ptr(me: Arc<T>) -> *mut T {
Arc::into_raw(me) as *mut T
}
fn as_ptr(me: &Arc<T>) -> *mut T {
// Slightly convoluted way to do this, but this avoids stacked borrows violations. The same
// intention as
//
// me as &T as *const T as *mut T
//
// We first create a "shallow copy" of me - one that doesn't really own its ref count
// (that's OK, me _does_ own it, so it can't be destroyed in the meantime).
// Then we can use into_raw (which preserves not having the ref count).
//
// We need to "revert" the changes we did. In current std implementation, the combination
// of from_raw and forget is no-op. But formally, into_raw shall be paired with from_raw
// and that read shall be paired with forget to properly "close the brackets". In future
// versions of STD, these may become something else that's not really no-op (unlikely, but
// possible), so we future-proof it a bit.
// SAFETY: &T cast to *const T will always be aligned, initialised and valid for reads
let ptr = Arc::into_raw(unsafe { ptr::read(me) });
let ptr = ptr as *mut T;
// SAFETY: We got the pointer from into_raw just above
mem::forget(unsafe { Arc::from_raw(ptr) });
ptr
}
unsafe fn from_ptr(ptr: *const T) -> Arc<T> {
Arc::from_raw(ptr)
}
}
unsafe impl<T> RefCnt for Rc<T> {
type Base = T;
fn into_ptr(me: Rc<T>) -> *mut T {
Rc::into_raw(me) as *mut T
}
fn as_ptr(me: &Rc<T>) -> *mut T {
// Slightly convoluted way to do this, but this avoids stacked borrows violations. The same
// intention as
//
// me as &T as *const T as *mut T
//
// We first create a "shallow copy" of me - one that doesn't really own its ref count
// (that's OK, me _does_ own it, so it can't be destroyed in the meantime).
// Then we can use into_raw (which preserves not having the ref count).
//
// We need to "revert" the changes we did. In current std implementation, the combination
// of from_raw and forget is no-op. But formally, into_raw shall be paired with from_raw
// and that read shall be paired with forget to properly "close the brackets". In future
// versions of STD, these may become something else that's not really no-op (unlikely, but
// possible), so we future-proof it a bit.
// SAFETY: &T cast to *const T will always be aligned, initialised and valid for reads
let ptr = Rc::into_raw(unsafe { ptr::read(me) });
let ptr = ptr as *mut T;
// SAFETY: We got the pointer from into_raw just above
mem::forget(unsafe { Rc::from_raw(ptr) });
ptr
}
unsafe fn from_ptr(ptr: *const T) -> Rc<T> {
Rc::from_raw(ptr)
}
}
unsafe impl<T: RefCnt> RefCnt for Option<T> {
type Base = T::Base;
fn into_ptr(me: Option<T>) -> *mut T::Base {
me.map(T::into_ptr).unwrap_or_else(ptr::null_mut)
}
fn as_ptr(me: &Option<T>) -> *mut T::Base {
me.as_ref().map(T::as_ptr).unwrap_or_else(ptr::null_mut)
}
unsafe fn from_ptr(ptr: *const T::Base) -> Option<T> {
if ptr.is_null() {
None
} else {
Some(T::from_ptr(ptr))
}
}
}

View file

@ -1,132 +0,0 @@
use crate::{ArcSwapAny, RefCnt, Strategy};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
impl<T, S> Serialize for ArcSwapAny<T, S>
where
T: RefCnt + Serialize,
S: Strategy<T>,
{
fn serialize<Ser: Serializer>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error> {
self.load().serialize(serializer)
}
}
impl<'de, T, S> Deserialize<'de> for ArcSwapAny<T, S>
where
T: RefCnt + Deserialize<'de>,
S: Strategy<T> + Default,
{
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
Ok(Self::from(T::deserialize(deserializer)?))
}
}
#[cfg(test)]
mod tests {
use crate::{ArcSwap, ArcSwapAny, ArcSwapOption, RefCnt};
use serde_derive::{Deserialize, Serialize};
use serde_test::{assert_tokens, Token};
use std::sync::Arc;
#[derive(Debug, Serialize, Deserialize)]
#[serde(transparent)]
struct ArcSwapAnyEq<T: RefCnt>(ArcSwapAny<T>);
impl<T: RefCnt + PartialEq> PartialEq for ArcSwapAnyEq<T> {
fn eq(&self, other: &Self) -> bool {
self.0.load().eq(&other.0.load())
}
}
impl<T: RefCnt + PartialEq> Eq for ArcSwapAnyEq<T> {}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Foo {
field0: u64,
field1: String,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Bar {
field0: ArcSwapAnyEq<Arc<u64>>,
field1: ArcSwapAnyEq<Option<Arc<String>>>,
}
#[test]
fn test_serialize_deserialize() {
let field0 = u64::MAX;
let field1 = "FOO_-0123456789";
let data_orig = Foo {
field0,
field1: field1.to_string(),
};
let data = ArcSwapAnyEq(ArcSwap::from_pointee(data_orig));
assert_tokens(
&data,
&[
Token::Struct {
name: "Foo",
len: 2,
},
Token::Str("field0"),
Token::U64(u64::MAX),
Token::Str("field1"),
Token::String(field1),
Token::StructEnd,
],
);
let data = Bar {
field0: ArcSwapAnyEq(ArcSwap::from_pointee(field0)),
field1: ArcSwapAnyEq(ArcSwapOption::from_pointee(field1.to_string())),
};
assert_tokens(
&data,
&[
Token::Struct {
name: "Bar",
len: 2,
},
Token::Str("field0"),
Token::U64(u64::MAX),
Token::Str("field1"),
Token::Some,
Token::String(field1),
Token::StructEnd,
],
);
}
#[test]
fn test_serialize_deserialize_option() {
let field0 = u64::MAX;
let field1 = "FOO_-0123456789";
let data_orig = Foo {
field0,
field1: field1.to_string(),
};
let data = ArcSwapAnyEq(ArcSwapOption::from_pointee(data_orig));
assert_tokens(
&data,
&[
Token::Some,
Token::Struct {
name: "Foo",
len: 2,
},
Token::Str("field0"),
Token::U64(u64::MAX),
Token::Str("field1"),
Token::String(field1),
Token::StructEnd,
],
);
}
#[test]
fn test_serialize_deserialize_option_none() {
let data = ArcSwapAnyEq(ArcSwapOption::<Foo>::from_pointee(None));
assert_tokens(&data, &[Token::None]);
}
}

View file

@ -1,235 +0,0 @@
//! A hybrid strategy.
//!
//! This is based on debts an Arc may owe a reference, but it is marked in the debt. It is either
//! put back (by stopping using it), or if the pointer is replaced, the writer bumps the reference
//! count and removes the debt.
//!
//! The strategy uses two different slots for the debts. The first ones are faster, but fallible.
//! If they fail (either because there's interference from a writer at the same time, or because
//! they are full), the secondary one that is slower, but always succeeds, is used. In the latter
//! case, the reference is bumped and this secondary debt slot is released, so it is available for
//! further loads.
//!
//! See the [crate::debt] module for the actual slot manipulation. Here we just wrap them into the
//! strategy.
use core::borrow::Borrow;
use core::mem::{self, ManuallyDrop};
use core::ops::Deref;
use core::ptr;
use core::sync::atomic::AtomicPtr;
use core::sync::atomic::Ordering::*;
use super::sealed::{CaS, InnerStrategy, Protected};
use crate::debt::{Debt, LocalNode};
use crate::ref_cnt::RefCnt;
pub struct HybridProtection<T: RefCnt> {
debt: Option<&'static Debt>,
ptr: ManuallyDrop<T>,
}
impl<T: RefCnt> HybridProtection<T> {
pub(super) unsafe fn new(ptr: *const T::Base, debt: Option<&'static Debt>) -> Self {
Self {
debt,
ptr: ManuallyDrop::new(T::from_ptr(ptr)),
}
}
/// Try getting a dept into a fast slot.
#[inline]
fn attempt(node: &LocalNode, storage: &AtomicPtr<T::Base>) -> Option<Self> {
// Relaxed is good enough here, see the Acquire below
let ptr = storage.load(Relaxed);
// Try to get a debt slot. If not possible, fail.
let debt = node.new_fast(ptr as usize)?;
// Acquire to get the data.
//
// SeqCst to make sure the storage vs. the debt are well ordered.
let confirm = storage.load(SeqCst);
if ptr == confirm {
// Successfully got a debt
Some(unsafe { Self::new(ptr, Some(debt)) })
} else if debt.pay::<T>(ptr) {
// It changed in the meantime, we return the debt (that is on the outdated pointer,
// possibly destroyed) and fail.
None
} else {
// It changed in the meantime, but the debt for the previous pointer was already paid
// for by someone else, so we are fine using it.
Some(unsafe { Self::new(ptr, None) })
}
}
/// Get a debt slot using the slower but always successful mechanism.
fn fallback(node: &LocalNode, storage: &AtomicPtr<T::Base>) -> Self {
// First, we claim a debt slot and store the address of the atomic pointer there, so the
// writer can optionally help us out with loading and protecting something.
let gen = node.new_helping(storage as *const _ as usize);
// We already synchronized the start of the sequence by SeqCst in the new_helping vs swap on
// the pointer. We just need to make sure to bring the pointee in (this can be newer than
// what we got in the Debt)
let candidate = storage.load(Acquire);
// Try to replace the debt with our candidate. If it works, we get the debt slot to use. If
// not, we get a replacement value, already protected and a debt to take care of.
match node.confirm_helping(gen, candidate as usize) {
Ok(debt) => {
// The fast path -> we got the debt confirmed alright.
Self::from_inner(unsafe { Self::new(candidate, Some(debt)).into_inner() })
}
Err((unused_debt, replacement)) => {
// The debt is on the candidate we provided and it is unused, we so we just pay it
// back right away.
if !unused_debt.pay::<T>(candidate) {
unsafe { T::dec(candidate) };
}
// We got a (possibly) different pointer out. But that one is already protected and
// the slot is paid back.
unsafe { Self::new(replacement as *mut _, None) }
}
}
}
#[inline]
fn as_ptr(&self) -> *const T::Base {
T::as_ptr(self.ptr.deref())
}
}
impl<T: RefCnt> Drop for HybridProtection<T> {
#[inline]
fn drop(&mut self) {
match self.debt.take() {
// We have our own copy of Arc, so we don't need a protection. Do nothing (but release
// the Arc below).
None => (),
// If we owed something, just return the debt. We don't have a pointer owned, so
// nothing to release.
Some(debt) => {
let ptr = T::as_ptr(&self.ptr);
if debt.pay::<T>(ptr) {
return;
}
// But if the debt was already paid for us, we need to release the pointer, as we
// were effectively already in the Unprotected mode.
}
}
// Equivalent to T::dec(ptr)
unsafe { ManuallyDrop::drop(&mut self.ptr) };
}
}
impl<T: RefCnt> Protected<T> for HybridProtection<T> {
#[inline]
fn from_inner(ptr: T) -> Self {
Self {
debt: None,
ptr: ManuallyDrop::new(ptr),
}
}
#[inline]
fn into_inner(mut self) -> T {
// Drop any debt and release any lock held by the given guard and return a
// full-featured value that even can outlive the ArcSwap it originated from.
match self.debt.take() {
None => (), // We have a fully loaded ref-counted pointer.
Some(debt) => {
let ptr = T::inc(&self.ptr);
if !debt.pay::<T>(ptr) {
unsafe { T::dec(ptr) };
}
}
}
// The ptr::read & forget is something like a cheating move. We can't move it out, because
// we have a destructor and Rust doesn't allow us to do that.
let inner = unsafe { ptr::read(self.ptr.deref()) };
mem::forget(self);
inner
}
}
impl<T: RefCnt> Borrow<T> for HybridProtection<T> {
#[inline]
fn borrow(&self) -> &T {
&self.ptr
}
}
pub trait Config {
// Mostly for testing, way to disable the fast slo
const USE_FAST: bool;
}
#[derive(Clone, Default)]
pub struct DefaultConfig;
impl Config for DefaultConfig {
const USE_FAST: bool = true;
}
#[derive(Clone, Default)]
pub struct HybridStrategy<Cfg> {
pub(crate) _config: Cfg,
}
impl<T, Cfg> InnerStrategy<T> for HybridStrategy<Cfg>
where
T: RefCnt,
Cfg: Config,
{
type Protected = HybridProtection<T>;
unsafe fn load(&self, storage: &AtomicPtr<T::Base>) -> Self::Protected {
LocalNode::with(|node| {
let fast = if Cfg::USE_FAST {
HybridProtection::attempt(node, storage)
} else {
None
};
fast.unwrap_or_else(|| HybridProtection::fallback(node, storage))
})
}
unsafe fn wait_for_readers(&self, old: *const T::Base, storage: &AtomicPtr<T::Base>) {
// The pay_all may need to provide fresh replacement values if someone else is loading from
// this particular storage. We do so by the exact same way, by `load` it's OK, a writer
// does not hold a slot and the reader doesn't recurse back into writer, so we won't run
// out of slots.
let replacement = || self.load(storage).into_inner();
Debt::pay_all::<T, _>(old, storage as *const _ as usize, replacement);
}
}
impl<T: RefCnt, Cfg: Config> CaS<T> for HybridStrategy<Cfg> {
unsafe fn compare_and_swap<C: crate::as_raw::AsRaw<T::Base>>(
&self,
storage: &AtomicPtr<T::Base>,
current: C,
new: T,
) -> Self::Protected {
loop {
let old = <Self as InnerStrategy<T>>::load(self, storage);
// Observation of their inequality is enough to make a verdict
if old.as_ptr() != current.as_raw() {
return old;
}
// If they are still equal, put the new one in.
let new_raw = T::as_ptr(&new);
if storage
.compare_exchange_weak(current.as_raw(), new_raw, SeqCst, Relaxed)
.is_ok()
{
// We successfully put the new value in. The ref count went in there too.
T::into_ptr(new);
<Self as InnerStrategy<T>>::wait_for_readers(self, old.as_ptr(), storage);
// We just got one ref count out of the storage and we have one in old. We don't
// need two.
T::dec(old.as_ptr());
return old;
}
}
}
}

View file

@ -1,168 +0,0 @@
//! Strategies for protecting the reference counts.
//!
//! There are multiple algorithms how to protect the reference counts while they're being updated
//! by multiple threads, each with its own set of pros and cons. The [`DefaultStrategy`] is used by
//! default and should generally be the least surprising option. It is possible to pick a different
//! strategy.
//!
//! For now, the traits in here are sealed and don't expose any methods to the users of the crate.
//! This is because we are not confident about the details just yet. In the future it may be
//! possible for downstream users to implement their own, but for now it is only so users can
//! choose one of the provided.
//!
//! It is expected that future strategies would come with different capabilities and limitations.
//! In particular, some that are not "tight" in the cleanup (delay the cleanup) or not support the
//! compare and swap operations.
//!
//! Currently, we have these strategies:
//!
//! * [`DefaultStrategy`] (this one is used implicitly)
//! * [`RwLock<()>`][std::sync::RwLock]
//!
//! # Testing
//!
//! Formally, the [`RwLock<()>`][std::sync::RwLock] may be used as a strategy too. It doesn't have
//! the performance characteristics or lock-free guarantees of the others, but it is much simpler
//! and contains less `unsafe` code (actually, less code altogether). Therefore, it can be used for
//! testing purposes and cross-checking.
//!
//! Note that generally, using [`RwLock<Arc<T>>`][std::sync::RwLock] is likely to be better
//! performance wise. So if the goal is to not use third-party unsafe code, only the one in
//! [`std`], that is the better option. This is provided mostly for investigation and testing of
//! [`ArcSwap`] itself or algorithms written to use [`ArcSwap`].
//!
//! *This is not meant to be used in production code*.
//!
//! [`ArcSwap`]: crate::ArcSwap
//! [`load`]: crate::ArcSwapAny::load
use core::borrow::Borrow;
use core::sync::atomic::AtomicPtr;
use crate::ref_cnt::RefCnt;
pub(crate) mod hybrid;
#[cfg(all(
feature = "internal-test-strategies",
feature = "experimental-thread-local"
))]
compile_error!("experimental-thread-local is incompatible with internal-test-strategies as it enables #[no_std]");
#[cfg(feature = "internal-test-strategies")]
mod rw_lock;
// Do not use from outside of the crate.
#[cfg(feature = "internal-test-strategies")]
#[doc(hidden)]
pub mod test_strategies;
use self::hybrid::{DefaultConfig, HybridStrategy};
/// The default strategy.
///
/// It is used by the type aliases [`ArcSwap`][crate::ArcSwap] and
/// [`ArcSwapOption`][crate::ArcSwapOption]. Only the other strategies need to be used explicitly.
///
/// # Performance characteristics
///
/// * It is optimized for read-heavy situations, with possibly many concurrent read accesses from
/// multiple threads. Readers don't contend each other at all.
/// * Readers are wait-free (with the exception of at most once in `usize::MAX / 4` accesses, which
/// is only lock-free).
/// * Writers are lock-free.
/// * Reclamation is exact the resource is released as soon as possible (works like RAII, not
/// like a traditional garbage collector; can contain non-`'static` data).
///
/// Each thread has a limited number of fast slots (currently 8, but the exact number is not
/// guaranteed). If it holds at most that many [`Guard`]s at once, acquiring them is fast. Once
/// these slots are used up (by holding to these many [`Guard`]s), acquiring more of them will be
/// slightly slower, but still wait-free.
///
/// If you expect to hold a lot of "handles" to the data around, or hold onto it for a long time,
/// you may want to prefer the [`load_full`][crate::ArcSwapAny::load_full] method.
///
/// The speed of the fast slots is in the ballpark of locking an *uncontented* mutex. The advantage
/// over the mutex is the stability of speed in the face of contention from other threads while
/// the performance of mutex goes rapidly down, the slowdown of running out of held slots or heavy
/// concurrent writer thread in the area of single-digit multiples.
///
/// The ballpark benchmark figures (my older computer) are around these, but you're welcome to run
/// the benchmarks in the git repository or write your own.
///
/// * Load (both uncontented and contented by other loads): ~30ns
/// * `load_full`: ~50ns uncontented, goes up a bit with other `load_full` in other threads on the
/// same `Arc` value (~80-100ns).
/// * Loads after running out of the slots about 10-20ns slower than `load_full`.
/// * Stores: Dependent on number of threads, but generally low microseconds.
/// * Loads with heavy concurrent writer (to the same `ArcSwap`): ~250ns.
///
/// [`load`]: crate::ArcSwapAny::load
/// [`Guard`]: crate::Guard
pub type DefaultStrategy = HybridStrategy<DefaultConfig>;
/// Strategy for isolating instances.
///
/// It is similar to [`DefaultStrategy`], however the spin lock is not sharded (therefore multiple
/// concurrent threads might get bigger hit when multiple threads have to fall back). Nevertheless,
/// each instance has a private spin lock, not influencing the other instances. That also makes
/// them bigger in memory.
///
/// The hazard pointers are still shared between all instances.
///
/// The purpose of this strategy is meant for cases where a single instance is going to be
/// "tortured" a lot, so it should not overflow to other instances.
///
/// This too may be changed for something else (but with at least as good guarantees, primarily
/// that other instances won't get influenced by the "torture").
// Testing if the DefaultStrategy is good enough to replace it fully and then deprecate.
#[doc(hidden)]
pub type IndependentStrategy = DefaultStrategy;
// TODO: When we are ready to un-seal, should these traits become unsafe?
pub(crate) mod sealed {
use super::*;
use crate::as_raw::AsRaw;
pub trait Protected<T>: Borrow<T> {
fn into_inner(self) -> T;
fn from_inner(ptr: T) -> Self;
}
pub trait InnerStrategy<T: RefCnt> {
// Drop „unlocks“
type Protected: Protected<T>;
unsafe fn load(&self, storage: &AtomicPtr<T::Base>) -> Self::Protected;
unsafe fn wait_for_readers(&self, old: *const T::Base, storage: &AtomicPtr<T::Base>);
}
pub trait CaS<T: RefCnt>: InnerStrategy<T> {
unsafe fn compare_and_swap<C: AsRaw<T::Base>>(
&self,
storage: &AtomicPtr<T::Base>,
current: C,
new: T,
) -> Self::Protected;
}
}
/// A strategy for protecting the reference counted pointer `T`.
///
/// This chooses the algorithm for how the reference counts are protected. Note that the user of
/// the crate can't implement the trait and can't access any method; this is hopefully temporary
/// measure to make sure the interface is not part of the stability guarantees of the crate. Once
/// enough experience is gained with implementing various strategies, it will be un-sealed and
/// users will be able to provide their own implementation.
///
/// For now, the trait works only as a bound to talk about the types that represent strategies.
pub trait Strategy<T: RefCnt>: sealed::InnerStrategy<T> {}
impl<T: RefCnt, S: sealed::InnerStrategy<T>> Strategy<T> for S {}
/// An extension of the [`Strategy`], allowing for compare and swap operation.
///
/// The compare and swap operation is "advanced" and not all strategies need to support them.
/// Therefore, it is a separate trait.
///
/// Similarly, it is not yet made publicly usable or implementable and works only as a bound.
pub trait CaS<T: RefCnt>: sealed::CaS<T> {}
impl<T: RefCnt, S: sealed::CaS<T>> CaS<T> for S {}

View file

@ -1,63 +0,0 @@
use core::sync::atomic::{AtomicPtr, Ordering};
use std::sync::RwLock;
use super::sealed::{CaS, InnerStrategy, Protected};
use crate::as_raw::AsRaw;
use crate::ref_cnt::RefCnt;
impl<T: RefCnt> Protected<T> for T {
#[inline]
fn from_inner(ptr: T) -> Self {
ptr
}
#[inline]
fn into_inner(self) -> T {
self
}
}
impl<T: RefCnt> InnerStrategy<T> for RwLock<()> {
type Protected = T;
unsafe fn load(&self, storage: &AtomicPtr<T::Base>) -> T {
let _guard = self.read().expect("We don't panic in here");
let ptr = storage.load(Ordering::Acquire);
let ptr = T::from_ptr(ptr as *const T::Base);
T::inc(&ptr);
ptr
}
unsafe fn wait_for_readers(&self, _: *const T::Base, _: &AtomicPtr<T::Base>) {
// By acquiring the write lock, we make sure there are no read locks present across it.
drop(self.write().expect("We don't panic in here"));
}
}
impl<T: RefCnt> CaS<T> for RwLock<()> {
unsafe fn compare_and_swap<C: AsRaw<T::Base>>(
&self,
storage: &AtomicPtr<T::Base>,
current: C,
new: T,
) -> Self::Protected {
let _lock = self.write();
let cur = current.as_raw();
let new = T::into_ptr(new);
let swapped = storage.compare_exchange(cur, new, Ordering::AcqRel, Ordering::Relaxed);
let old = match swapped {
Ok(old) => old,
Err(old) => old,
};
let old = T::from_ptr(old as *const T::Base);
if swapped.is_err() {
// If the new didn't go in, we need to destroy it and increment count in the old that
// we just duplicated
T::inc(&old);
drop(T::from_ptr(new));
}
drop(current);
old
}
}

View file

@ -1,22 +0,0 @@
#![deprecated(note = "Only for internal testing. Do not use")]
#![allow(deprecated)] // We need to allow ourselves the stuff we deprecate here.
//! Some strategies for internal testing.
//!
//! # Warning
//!
//! They come with no guarantees of correctness, stability, performance or anything at all. *DO NOT
//! USE*.
use super::hybrid::{Config, HybridStrategy};
/// Config for no fast slots.
#[derive(Clone, Copy, Default)]
pub struct NoFastSlots;
impl Config for NoFastSlots {
const USE_FAST: bool = false;
}
/// A strategy that fills the slots with some crap to make sure we test the fallbacks too.
#[deprecated(note = "Only for internal testing. Do not use")]
pub type FillFastSlots = HybridStrategy<NoFastSlots>;

View file

@ -1,118 +0,0 @@
use core::ptr;
use alloc::rc::Weak as RcWeak;
use alloc::sync::Weak;
use crate::RefCnt;
unsafe impl<T> RefCnt for Weak<T> {
type Base = T;
fn as_ptr(me: &Self) -> *mut T {
if Weak::ptr_eq(&Weak::new(), me) {
ptr::null_mut()
} else {
Weak::as_ptr(me) as *mut T
}
}
fn into_ptr(me: Self) -> *mut T {
if Weak::ptr_eq(&Weak::new(), &me) {
ptr::null_mut()
} else {
Weak::into_raw(me) as *mut T
}
}
unsafe fn from_ptr(ptr: *const T) -> Self {
if ptr.is_null() {
Weak::new()
} else {
Weak::from_raw(ptr)
}
}
}
unsafe impl<T> RefCnt for RcWeak<T> {
type Base = T;
fn as_ptr(me: &Self) -> *mut T {
if RcWeak::ptr_eq(&RcWeak::new(), me) {
ptr::null_mut()
} else {
RcWeak::as_ptr(me) as *mut T
}
}
fn into_ptr(me: Self) -> *mut T {
if RcWeak::ptr_eq(&RcWeak::new(), &me) {
ptr::null_mut()
} else {
RcWeak::into_raw(me) as *mut T
}
}
unsafe fn from_ptr(ptr: *const T) -> Self {
if ptr.is_null() {
RcWeak::new()
} else {
RcWeak::from_raw(ptr)
}
}
}
macro_rules! t {
($name: ident, $strategy: ty) => {
#[cfg(test)]
mod $name {
use alloc::sync::{Arc, Weak};
use crate::ArcSwapAny;
#[allow(deprecated)] // We use "deprecated" testing strategies in here.
type ArcSwapWeak<T> = ArcSwapAny<Weak<T>, $strategy>;
// Convert to weak, push it through the shared and pull it out again.
#[test]
fn there_and_back() {
let data = Arc::new("Hello");
let shared = ArcSwapWeak::new(Arc::downgrade(&data));
assert_eq!(1, Arc::strong_count(&data));
assert_eq!(1, Arc::weak_count(&data));
let weak = shared.load();
assert_eq!("Hello", *weak.upgrade().unwrap());
assert!(Arc::ptr_eq(&data, &weak.upgrade().unwrap()));
}
// Replace a weak pointer with a NULL one
#[test]
fn reset() {
let data = Arc::new("Hello");
let shared = ArcSwapWeak::new(Arc::downgrade(&data));
assert_eq!(1, Arc::strong_count(&data));
assert_eq!(1, Arc::weak_count(&data));
// An empty weak (eg. NULL)
shared.store(Weak::new());
assert_eq!(1, Arc::strong_count(&data));
assert_eq!(0, Arc::weak_count(&data));
let weak = shared.load();
assert!(weak.upgrade().is_none());
}
// Destroy the underlying data while the weak is still stored inside. Should make it go
// NULL-ish
#[test]
fn destroy() {
let data = Arc::new("Hello");
let shared = ArcSwapWeak::new(Arc::downgrade(&data));
drop(data);
let weak = shared.load();
assert!(weak.upgrade().is_none());
}
}
};
}
t!(tests_default, crate::DefaultStrategy);
#[cfg(feature = "internal-test-strategies")]
t!(
tests_full_slots,
crate::strategy::test_strategies::FillFastSlots
);

View file

@ -1,125 +0,0 @@
//! Let it torture the implementation with some randomized operations.
use std::mem;
use std::sync::Arc;
use arc_swap::{ArcSwapAny, DefaultStrategy, IndependentStrategy};
use once_cell::sync::Lazy;
use proptest::prelude::*;
#[derive(Copy, Clone, Debug)]
enum OpsInstruction {
Store(usize),
Swap(usize),
LoadFull,
Load,
}
impl OpsInstruction {
fn random() -> impl Strategy<Value = Self> {
prop_oneof![
any::<usize>().prop_map(Self::Store),
any::<usize>().prop_map(Self::Swap),
Just(Self::LoadFull),
Just(Self::Load),
]
}
}
proptest! {}
const LIMIT: usize = 5;
#[cfg(not(miri))]
const SIZE: usize = 100;
#[cfg(miri)]
const SIZE: usize = 10;
static ARCS: Lazy<Vec<Arc<usize>>> = Lazy::new(|| (0..LIMIT).map(Arc::new).collect());
#[derive(Copy, Clone, Debug)]
enum SelInstruction {
Swap(usize),
Cas(usize, usize),
}
impl SelInstruction {
fn random() -> impl Strategy<Value = Self> {
prop_oneof![
(0..LIMIT).prop_map(Self::Swap),
(0..LIMIT, 0..LIMIT).prop_map(|(cur, new)| Self::Cas(cur, new)),
]
}
}
// Generate the same tests for bunch of strategies (one module for one strategy)
macro_rules! t {
(@full => $name: ident, $strategy: ty) => {
t!(@compose => $name, $strategy,
#[test]
fn selection(
instructions in proptest::collection::vec(SelInstruction::random(), 1..SIZE),
) {
let mut bare = Arc::clone(&ARCS[0]);
#[allow(deprecated)] // We use "deprecated" testing strategies in here.
let a = ArcSwapAny::<_, $strategy>::from(Arc::clone(&ARCS[0]));
for ins in instructions {
match ins {
SelInstruction::Swap(idx) => {
let expected = mem::replace(&mut bare, Arc::clone(&ARCS[idx]));
let actual = a.swap(Arc::clone(&ARCS[idx]));
assert!(Arc::ptr_eq(&expected, &actual));
}
SelInstruction::Cas(cur, new) => {
let expected = Arc::clone(&bare);
if bare == ARCS[cur] {
bare = Arc::clone(&ARCS[new]);
}
let actual = a.compare_and_swap(&ARCS[cur], Arc::clone(&ARCS[new]));
assert!(Arc::ptr_eq(&expected, &actual));
}
}
}
}
);
};
(@nocas => $name: ident, $strategy: ty) => {
t!(@compose => $name, $strategy, );
};
(@compose => $name: ident, $strategy: ty, $($extra: tt)*) => {
mod $name {
use super::*;
proptest! {
$($extra)*
#[test]
fn ops(
instructions in proptest::collection::vec(OpsInstruction::random(), 1..SIZE),
) {
use crate::OpsInstruction::*;
let mut m = 0;
#[allow(deprecated)] // We use "deprecated" testing strategies in here.
let a = ArcSwapAny::<_, $strategy>::new(Arc::new(0usize));
for ins in instructions {
match ins {
Store(v) => {
m = v;
a.store(Arc::new(v));
}
Swap(v) => {
let old = mem::replace(&mut m, v);
assert_eq!(old, *a.swap(Arc::new(v)));
}
Load => assert_eq!(m, **a.load()),
LoadFull => assert_eq!(m, *a.load_full()),
}
}
}
}
}
};
}
t!(@full => default, DefaultStrategy);
t!(@full => independent, IndependentStrategy);
#[cfg(feature = "internal-test-strategies")]
t!(@full => full_slots, arc_swap::strategy::test_strategies::FillFastSlots);

View file

@ -1,310 +0,0 @@
//! Stress-tests
//!
//! The tests in here try to torture the implementation with multiple threads, in an attempt to
//! discover any possible race condition.
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, MutexGuard, PoisonError};
use adaptive_barrier::{Barrier, PanicMode};
use arc_swap::strategy::{CaS, DefaultStrategy, IndependentStrategy, Strategy};
use arc_swap::ArcSwapAny;
use crossbeam_utils::thread;
use itertools::Itertools;
use once_cell::sync::Lazy;
static LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
/// We want to prevent these tests from running concurrently, because they run multi-threaded.
fn lock() -> MutexGuard<'static, ()> {
LOCK.lock().unwrap_or_else(PoisonError::into_inner)
}
struct LLNode<S: Strategy<Option<Arc<LLNode<S>>>>> {
next: ArcSwapAny<Option<Arc<LLNode<S>>>, S>,
num: usize,
owner: usize,
}
/// A test that repeatedly builds a linked list concurrently with multiple threads.
///
/// The idea here is to stress-test the RCU implementation and see that no items get lost and that
/// the ref counts are correct afterwards.
fn storm_link_list<S>(node_cnt: usize, iters: usize)
where
S: Default + CaS<Option<Arc<LLNode<S>>>> + Send + Sync,
{
let _lock = lock();
let head = ArcSwapAny::<_, S>::from(None::<Arc<LLNode<S>>>);
#[cfg(not(miri))]
let cpus = num_cpus::get();
#[cfg(miri)]
let cpus = 2;
let barr = Barrier::new(PanicMode::Poison);
thread::scope(|scope| {
for thread in 0..cpus {
// We want to borrow these, but that kind-of conflicts with the move closure mode
let mut barr = barr.clone();
let head = &head;
scope.spawn(move |_| {
let nodes = (0..node_cnt)
.map(|i| LLNode {
next: ArcSwapAny::from(None),
num: i,
owner: thread,
})
.map(Arc::new)
.collect::<Vec<_>>();
for iter in 0..iters {
barr.wait(); // Start synchronously
for n in nodes.iter().rev() {
head.rcu(|head| {
n.next.store(head.clone()); // Cloning the optional Arc
Some(Arc::clone(n))
});
}
// And do the checks once everyone finishes
barr.wait();
// First, check that all our numbers are increasing by one and all are present
let mut node = head.load();
let mut expecting = 0;
while node.is_some() {
// A bit of gymnastics, we don't have NLL yet and we need to persuade the
// borrow checker this is safe.
let next = {
let inner = node.as_ref().unwrap();
if inner.owner == thread {
assert_eq!(expecting, inner.num);
expecting += 1;
}
inner.next.load()
};
node = next;
}
assert_eq!(node_cnt, expecting);
// We don't want to count the ref-counts while someone still plays around with
// them and loading.
barr.wait();
// Now that we've checked we have everything, check that all the nodes have ref
// count 2 once in the vector, once in the linked list.
for n in &nodes {
assert_eq!(
2,
Arc::strong_count(n),
"Wrong number of counts in item {} in iteration {}",
n.num,
iter,
);
}
// Reset the head so we don't mix the runs together, which would create a mess.
// Also, the tails might disturb the ref counts.
barr.wait();
head.store(None);
nodes.last().unwrap().next.store(None);
}
barr.wait();
// We went through all the iterations. Dismantle the list and see that everything
// has ref count 1.
head.store(None);
for n in &nodes {
n.next.store(None);
}
barr.wait(); // Wait until everyone resets their own nexts
for n in &nodes {
assert_eq!(1, Arc::strong_count(n));
}
});
}
drop(barr);
})
.unwrap();
}
struct LLNodeCnt<'a> {
next: Option<Arc<LLNodeCnt<'a>>>,
num: usize,
owner: usize,
live_cnt: &'a AtomicUsize,
}
impl<'a> Drop for LLNodeCnt<'a> {
fn drop(&mut self) {
self.live_cnt.fetch_sub(1, Ordering::Relaxed);
}
}
/// Test where we build and then deconstruct a linked list using multiple threads.
fn storm_unroll<S>(node_cnt: usize, iters: usize)
where
S: Default + Send + Sync,
for<'a> S: CaS<Option<Arc<LLNodeCnt<'a>>>>,
{
let _lock = lock();
#[cfg(not(miri))]
let cpus = num_cpus::get();
#[cfg(miri)]
let cpus = 2;
let barr = Barrier::new(PanicMode::Poison);
let global_cnt = AtomicUsize::new(0);
// We plan to create this many nodes during the whole test.
let live_cnt = AtomicUsize::new(cpus * node_cnt * iters);
let head = ArcSwapAny::<_, S>::from(None);
thread::scope(|scope| {
for thread in 0..cpus {
// Borrow these instead of moving.
let head = &head;
let mut barr = barr.clone();
let global_cnt = &global_cnt;
let live_cnt = &live_cnt;
scope.spawn(move |_| {
for iter in 0..iters {
barr.wait();
// Create bunch of nodes and put them into the list.
for i in 0..node_cnt {
let mut node = Arc::new(LLNodeCnt {
next: None,
num: i,
owner: thread,
live_cnt,
});
head.rcu(|head| {
// Clone Option<Arc>
Arc::get_mut(&mut node).unwrap().next = head.clone();
Arc::clone(&node)
});
}
if barr.wait().is_leader() {
let mut cnt = 0;
let mut node = head.load_full();
while let Some(n) = node.as_ref() {
cnt += 1;
node = n.next.clone();
}
assert_eq!(cnt, node_cnt * cpus);
}
barr.wait();
// Keep removing items, count how many there are and that they increase in each
// thread's list.
let mut last_seen = vec![node_cnt; cpus];
let mut cnt = 0;
while let Some(node) =
head.rcu(|head| head.as_ref().and_then(|h| h.next.clone()))
{
assert!(last_seen[node.owner] > node.num);
last_seen[node.owner] = node.num;
cnt += 1;
}
global_cnt.fetch_add(cnt, Ordering::Relaxed);
if barr.wait().is_leader() {
assert_eq!(node_cnt * cpus, global_cnt.swap(0, Ordering::Relaxed));
}
assert_eq!(
(iters - iter - 1) * node_cnt * cpus,
live_cnt.load(Ordering::Relaxed),
);
}
});
}
drop(barr);
})
.unwrap();
// Everything got destroyed properly.
assert_eq!(0, live_cnt.load(Ordering::Relaxed));
}
fn load_parallel<S>(iters: usize)
where
S: Default + Strategy<Arc<usize>> + Send + Sync,
{
let _lock = lock();
#[cfg(not(miri))]
let cpus = num_cpus::get();
#[cfg(miri)]
let cpus = 2;
let shared = ArcSwapAny::<_, S>::from(Arc::new(0));
thread::scope(|scope| {
scope.spawn(|_| {
for i in 0..iters {
shared.store(Arc::new(i));
}
});
for _ in 0..cpus {
scope.spawn(|_| {
for _ in 0..iters {
let guards = (0..256).map(|_| shared.load()).collect::<Vec<_>>();
for (l, h) in guards.iter().tuple_windows() {
assert!(**l <= **h, "{} > {}", l, h);
}
}
});
}
})
.unwrap();
let v = shared.load_full();
assert_eq!(2, Arc::strong_count(&v));
}
#[cfg(not(miri))]
const ITER_SMALL: usize = 100;
#[cfg(not(miri))]
const ITER_MID: usize = 1000;
#[cfg(miri)]
const ITER_SMALL: usize = 2;
#[cfg(miri)]
const ITER_MID: usize = 5;
macro_rules! t {
($name: ident, $strategy: ty) => {
mod $name {
use super::*;
#[allow(deprecated)] // We use some "deprecated" testing strategies
type Strategy = $strategy;
#[test]
fn storm_link_list_small() {
storm_link_list::<Strategy>(ITER_SMALL, 5);
}
#[test]
#[ignore]
fn storm_link_list_large() {
storm_link_list::<Strategy>(10_000, 50);
}
#[test]
fn storm_unroll_small() {
storm_unroll::<Strategy>(ITER_SMALL, 5);
}
#[test]
#[ignore]
fn storm_unroll_large() {
storm_unroll::<Strategy>(10_000, 50);
}
#[test]
fn load_parallel_small() {
load_parallel::<Strategy>(ITER_MID);
}
#[test]
#[ignore]
fn load_parallel_large() {
load_parallel::<Strategy>(100_000);
}
}
};
}
t!(default, DefaultStrategy);
t!(independent, IndependentStrategy);
#[cfg(feature = "internal-test-strategies")]
t!(
full_slots,
arc_swap::strategy::test_strategies::FillFastSlots
);

View file

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"b0fe0b71bd530094d7aaf823bdfd5984fe4a2feea6fec3ae62975bcaed562d8c","Cargo.toml":"7c0f037ccc56d911f51bda22c475aa094e757baeb23e723bd2c2c02f324d117e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"86445da156ad68ea1d1f2dc49a3cef942ccc377ff56316aefe89732ded763aba","src/deque.rs":"54ef2940700056eff8f099e5efed10838cdf5c3ec1de91659ffcb19115fa3593","src/lib.rs":"b9899494f3933c041b059fd920fecb3226f5e91b06c3736b19501799289633e1","tests/fifo.rs":"3d98e0d4ca7cfddf10708b71642cf1ff05543d067ad837e48401d63cc31c0a18","tests/injector.rs":"fb054ef9fcac5f12e08b7b3451f370b96ab7589d32ef5c02e25958a473c45519","tests/lifo.rs":"57abdb3fc5920a422f785ba308b658bdc5400947532eeffb799f2395a2061549","tests/steal.rs":"cdf588cc13eeb275ef1231eb18e3245faca7a2d054fa6527bfdba2a34bc8f7bf"},"package":"613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"}

View file

@ -1,137 +0,0 @@
# Version 0.8.5
- Remove dependency on `cfg-if`. (#1072)
# Version 0.8.4
- Bump the minimum supported Rust version to 1.61. (#1037)
# Version 0.8.3
- Add `Stealer::{steal_batch_with_limit, steal_batch_with_limit_and_pop}` methods. (#903)
- Add `Injector::{steal_batch_with_limit, steal_batch_with_limit_and_pop}` methods. (#903)
# Version 0.8.2
- Bump the minimum supported Rust version to 1.38. (#877)
# Version 0.8.1
- Fix deque steal race condition. (#726)
- Add `Stealer::len` method. (#708)
# Version 0.8.0
**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details.
- Bump the minimum supported Rust version to 1.36.
- Add `Worker::len()` and `Injector::len()` methods.
- Add `std` (enabled by default) feature for forward compatibility.
# Version 0.7.4
- Fix deque steal race condition.
# Version 0.7.3
**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details.
- Stop stealing from the same deque. (#448)
- Fix unsoundness issues by adopting `MaybeUninit`. (#458)
# Version 0.7.2
**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details.
- Bump `crossbeam-epoch` to `0.8`.
- Bump `crossbeam-utils` to `0.7`.
# Version 0.7.1
**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details.
- Bump the minimum required version of `crossbeam-utils`.
# Version 0.7.0
**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details.
- Make `Worker::pop()` faster in the FIFO case.
- Replace `fifo()` nad `lifo()` with `Worker::new_fifo()` and `Worker::new_lifo()`.
- Add more batched steal methods.
- Introduce `Injector<T>`, a MPMC queue.
- Rename `Steal::Data` to `Steal::Success`.
- Add `Steal::or_else()` and implement `FromIterator` for `Steal`.
- Add `#[must_use]` to `Steal`.
# Version 0.6.3
- Bump `crossbeam-epoch` to `0.7`.
# Version 0.6.2
- Update `crosbeam-utils` to `0.6`.
# Version 0.6.1
- Change a few `Relaxed` orderings to `Release` in order to fix false positives by tsan.
# Version 0.6.0
- Add `Stealer::steal_many` for batched stealing.
- Change the return type of `pop` to `Pop<T>` so that spinning can be handled manually.
# Version 0.5.2
- Update `crossbeam-utils` to `0.5.0`.
# Version 0.5.1
- Minor optimizations.
# Version 0.5.0
- Add two deque constructors : `fifo()` and `lifo()`.
- Update `rand` to `0.5.3`.
- Rename `Deque` to `Worker`.
- Return `Option<T>` from `Stealer::steal`.
- Remove methods `Deque::len` and `Stealer::len`.
- Remove method `Deque::stealer`.
- Remove method `Deque::steal`.
# Version 0.4.1
- Update `crossbeam-epoch` to `0.5.0`.
# Version 0.4.0
- Update `crossbeam-epoch` to `0.4.2`.
- Update `crossbeam-utils` to `0.4.0`.
- Require minimum Rust version 1.25.
# Version 0.3.1
- Add `Deque::capacity`.
- Add `Deque::min_capacity`.
- Add `Deque::shrink_to_fit`.
- Update `crossbeam-epoch` to `0.3.0`.
- Support Rust 1.20.
- Shrink the buffer in `Deque::push` if necessary.
# Version 0.3.0
- Update `crossbeam-epoch` to `0.4.0`.
- Drop support for Rust 1.13.
# Version 0.2.0
- Update `crossbeam-epoch` to `0.3.0`.
- Support Rust 1.13.
# Version 0.1.1
- Update `crossbeam-epoch` to `0.2.0`.
# Version 0.1.0
- First implementation of the Chase-Lev deque.

View file

@ -1,50 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.61"
name = "crossbeam-deque"
version = "0.8.5"
description = "Concurrent work-stealing deque"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-deque"
readme = "README.md"
keywords = [
"chase-lev",
"lock-free",
"scheduler",
"scheduling",
]
categories = [
"algorithms",
"concurrency",
"data-structures",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.crossbeam-epoch]
version = "0.9.17"
default-features = false
[dependencies.crossbeam-utils]
version = "0.8.18"
default-features = false
[dev-dependencies.rand]
version = "0.8"
[features]
default = ["std"]
std = [
"crossbeam-epoch/std",
"crossbeam-utils/std",
]

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,27 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 The Crossbeam Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -1,46 +0,0 @@
# Crossbeam Deque
[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)](
https://github.com/crossbeam-rs/crossbeam/actions)
[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-deque#license)
[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)](
https://crates.io/crates/crossbeam-deque)
[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)](
https://docs.rs/crossbeam-deque)
[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)](
https://www.rust-lang.org)
[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ)
This crate provides work-stealing deques, which are primarily intended for
building task schedulers.
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-deque = "0.8"
```
## Compatibility
Crossbeam Deque supports stable Rust releases going back at least six months,
and every time the minimum supported Rust version is increased, a new minor
version is released. Currently, the minimum supported Rust version is 1.61.
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

File diff suppressed because it is too large Load diff

View file

@ -1,103 +0,0 @@
//! Concurrent work-stealing deques.
//!
//! These data structures are most commonly used in work-stealing schedulers. The typical setup
//! involves a number of threads, each having its own FIFO or LIFO queue (*worker*). There is also
//! one global FIFO queue (*injector*) and a list of references to *worker* queues that are able to
//! steal tasks (*stealers*).
//!
//! We spawn a new task onto the scheduler by pushing it into the *injector* queue. Each worker
//! thread waits in a loop until it finds the next task to run and then runs it. To find a task, it
//! first looks into its local *worker* queue, and then into the *injector* and *stealers*.
//!
//! # Queues
//!
//! [`Injector`] is a FIFO queue, where tasks are pushed and stolen from opposite ends. It is
//! shared among threads and is usually the entry point for new tasks.
//!
//! [`Worker`] has two constructors:
//!
//! * [`new_fifo()`] - Creates a FIFO queue, in which tasks are pushed and popped from opposite
//! ends.
//! * [`new_lifo()`] - Creates a LIFO queue, in which tasks are pushed and popped from the same
//! end.
//!
//! Each [`Worker`] is owned by a single thread and supports only push and pop operations.
//!
//! Method [`stealer()`] creates a [`Stealer`] that may be shared among threads and can only steal
//! tasks from its [`Worker`]. Tasks are stolen from the end opposite to where they get pushed.
//!
//! # Stealing
//!
//! Steal operations come in three flavors:
//!
//! 1. [`steal()`] - Steals one task.
//! 2. [`steal_batch()`] - Steals a batch of tasks and moves them into another worker.
//! 3. [`steal_batch_and_pop()`] - Steals a batch of tasks, moves them into another queue, and pops
//! one task from that worker.
//!
//! In contrast to push and pop operations, stealing can spuriously fail with [`Steal::Retry`], in
//! which case the steal operation needs to be retried.
//!
//! # Examples
//!
//! Suppose a thread in a work-stealing scheduler is idle and looking for the next task to run. To
//! find an available task, it might do the following:
//!
//! 1. Try popping one task from the local worker queue.
//! 2. Try stealing a batch of tasks from the global injector queue.
//! 3. Try stealing one task from another thread using the stealer list.
//!
//! An implementation of this work-stealing strategy:
//!
//! ```
//! use crossbeam_deque::{Injector, Stealer, Worker};
//! use std::iter;
//!
//! fn find_task<T>(
//! local: &Worker<T>,
//! global: &Injector<T>,
//! stealers: &[Stealer<T>],
//! ) -> Option<T> {
//! // Pop a task from the local queue, if not empty.
//! local.pop().or_else(|| {
//! // Otherwise, we need to look for a task elsewhere.
//! iter::repeat_with(|| {
//! // Try stealing a batch of tasks from the global queue.
//! global.steal_batch_and_pop(local)
//! // Or try stealing a task from one of the other threads.
//! .or_else(|| stealers.iter().map(|s| s.steal()).collect())
//! })
//! // Loop while no task was stolen and any steal operation needs to be retried.
//! .find(|s| !s.is_retry())
//! // Extract the stolen task, if there is one.
//! .and_then(|s| s.success())
//! })
//! }
//! ```
//!
//! [`new_fifo()`]: Worker::new_fifo
//! [`new_lifo()`]: Worker::new_lifo
//! [`stealer()`]: Worker::stealer
//! [`steal()`]: Stealer::steal
//! [`steal_batch()`]: Stealer::steal_batch
//! [`steal_batch_and_pop()`]: Stealer::steal_batch_and_pop
#![doc(test(
no_crate_inject,
attr(
deny(warnings, rust_2018_idioms),
allow(dead_code, unused_assignments, unused_variables)
)
))]
#![warn(
missing_docs,
missing_debug_implementations,
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "std")]
mod deque;
#[cfg(feature = "std")]
pub use crate::deque::{Injector, Steal, Stealer, Worker};

View file

@ -1,357 +0,0 @@
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use crossbeam_deque::Steal::{Empty, Success};
use crossbeam_deque::Worker;
use crossbeam_utils::thread::scope;
use rand::Rng;
#[test]
fn smoke() {
let w = Worker::new_fifo();
let s = w.stealer();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
assert_eq!(w.pop(), Some(1));
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(2);
assert_eq!(s.steal(), Success(2));
assert_eq!(s.steal(), Empty);
assert_eq!(w.pop(), None);
w.push(3);
w.push(4);
w.push(5);
assert_eq!(s.steal(), Success(3));
assert_eq!(s.steal(), Success(4));
assert_eq!(s.steal(), Success(5));
assert_eq!(s.steal(), Empty);
w.push(6);
w.push(7);
w.push(8);
w.push(9);
assert_eq!(w.pop(), Some(6));
assert_eq!(s.steal(), Success(7));
assert_eq!(w.pop(), Some(8));
assert_eq!(w.pop(), Some(9));
assert_eq!(w.pop(), None);
}
#[test]
fn is_empty() {
let w = Worker::new_fifo();
let s = w.stealer();
assert!(w.is_empty());
w.push(1);
assert!(!w.is_empty());
w.push(2);
assert!(!w.is_empty());
let _ = w.pop();
assert!(!w.is_empty());
let _ = w.pop();
assert!(w.is_empty());
assert!(s.is_empty());
w.push(1);
assert!(!s.is_empty());
w.push(2);
assert!(!s.is_empty());
let _ = s.steal();
assert!(!s.is_empty());
let _ = s.steal();
assert!(s.is_empty());
}
#[test]
fn spsc() {
#[cfg(miri)]
const STEPS: usize = 500;
#[cfg(not(miri))]
const STEPS: usize = 50_000;
let w = Worker::new_fifo();
let s = w.stealer();
scope(|scope| {
scope.spawn(|_| {
for i in 0..STEPS {
loop {
if let Success(v) = s.steal() {
assert_eq!(i, v);
break;
}
}
}
assert_eq!(s.steal(), Empty);
});
for i in 0..STEPS {
w.push(i);
}
})
.unwrap();
}
#[test]
fn stampede() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
let w = Worker::new_fifo();
for i in 0..COUNT {
w.push(Box::new(i + 1));
}
let remaining = Arc::new(AtomicUsize::new(COUNT));
scope(|scope| {
for _ in 0..THREADS {
let s = w.stealer();
let remaining = remaining.clone();
scope.spawn(move |_| {
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Success(x) = s.steal() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
});
}
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Some(x) = w.pop() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
})
.unwrap();
}
#[test]
fn stress() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
let w = Worker::new_fifo();
let done = Arc::new(AtomicBool::new(false));
let hits = Arc::new(AtomicUsize::new(0));
scope(|scope| {
for _ in 0..THREADS {
let s = w.stealer();
let done = done.clone();
let hits = hits.clone();
scope.spawn(move |_| {
let w2 = Worker::new_fifo();
while !done.load(SeqCst) {
if let Success(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
let _ = s.steal_batch(&w2);
if let Success(_) = s.steal_batch_and_pop(&w2) {
hits.fetch_add(1, SeqCst);
}
while w2.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
});
}
let mut rng = rand::thread_rng();
let mut expected = 0;
while expected < COUNT {
if rng.gen_range(0..3) == 0 {
while w.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
} else {
w.push(expected);
expected += 1;
}
}
while hits.load(SeqCst) < COUNT {
while w.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
done.store(true, SeqCst);
})
.unwrap();
}
#[cfg_attr(miri, ignore)] // Miri is too slow
#[test]
fn no_starvation() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let w = Worker::new_fifo();
let done = Arc::new(AtomicBool::new(false));
let mut all_hits = Vec::new();
scope(|scope| {
for _ in 0..THREADS {
let s = w.stealer();
let done = done.clone();
let hits = Arc::new(AtomicUsize::new(0));
all_hits.push(hits.clone());
scope.spawn(move |_| {
let w2 = Worker::new_fifo();
while !done.load(SeqCst) {
if let Success(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
let _ = s.steal_batch(&w2);
if let Success(_) = s.steal_batch_and_pop(&w2) {
hits.fetch_add(1, SeqCst);
}
while w2.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
});
}
let mut rng = rand::thread_rng();
let mut my_hits = 0;
loop {
for i in 0..rng.gen_range(0..COUNT) {
if rng.gen_range(0..3) == 0 && my_hits == 0 {
while w.pop().is_some() {
my_hits += 1;
}
} else {
w.push(i);
}
}
if my_hits > 0 && all_hits.iter().all(|h| h.load(SeqCst) > 0) {
break;
}
}
done.store(true, SeqCst);
})
.unwrap();
}
#[test]
fn destructors() {
#[cfg(miri)]
const THREADS: usize = 2;
#[cfg(not(miri))]
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
#[cfg(miri)]
const STEPS: usize = 100;
#[cfg(not(miri))]
const STEPS: usize = 1000;
struct Elem(usize, Arc<Mutex<Vec<usize>>>);
impl Drop for Elem {
fn drop(&mut self) {
self.1.lock().unwrap().push(self.0);
}
}
let w = Worker::new_fifo();
let dropped = Arc::new(Mutex::new(Vec::new()));
let remaining = Arc::new(AtomicUsize::new(COUNT));
for i in 0..COUNT {
w.push(Elem(i, dropped.clone()));
}
scope(|scope| {
for _ in 0..THREADS {
let remaining = remaining.clone();
let s = w.stealer();
scope.spawn(move |_| {
let w2 = Worker::new_fifo();
let mut cnt = 0;
while cnt < STEPS {
if let Success(_) = s.steal() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
let _ = s.steal_batch(&w2);
if let Success(_) = s.steal_batch_and_pop(&w2) {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
while w2.pop().is_some() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
}
});
}
for _ in 0..STEPS {
if w.pop().is_some() {
remaining.fetch_sub(1, SeqCst);
}
}
})
.unwrap();
let rem = remaining.load(SeqCst);
assert!(rem > 0);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), COUNT - rem);
v.clear();
}
drop(w);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), rem);
v.sort_unstable();
for pair in v.windows(2) {
assert_eq!(pair[0] + 1, pair[1]);
}
}
}

View file

@ -1,375 +0,0 @@
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use crossbeam_deque::Steal::{Empty, Success};
use crossbeam_deque::{Injector, Worker};
use crossbeam_utils::thread::scope;
use rand::Rng;
#[test]
fn smoke() {
let q = Injector::new();
assert_eq!(q.steal(), Empty);
q.push(1);
q.push(2);
assert_eq!(q.steal(), Success(1));
assert_eq!(q.steal(), Success(2));
assert_eq!(q.steal(), Empty);
q.push(3);
assert_eq!(q.steal(), Success(3));
assert_eq!(q.steal(), Empty);
}
#[test]
fn is_empty() {
let q = Injector::new();
assert!(q.is_empty());
q.push(1);
assert!(!q.is_empty());
q.push(2);
assert!(!q.is_empty());
let _ = q.steal();
assert!(!q.is_empty());
let _ = q.steal();
assert!(q.is_empty());
q.push(3);
assert!(!q.is_empty());
let _ = q.steal();
assert!(q.is_empty());
}
#[test]
fn spsc() {
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
let q = Injector::new();
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
loop {
if let Success(v) = q.steal() {
assert_eq!(i, v);
break;
}
#[cfg(miri)]
std::hint::spin_loop();
}
}
assert_eq!(q.steal(), Empty);
});
for i in 0..COUNT {
q.push(i);
}
})
.unwrap();
}
#[test]
fn mpmc() {
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = Injector::new();
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
q.push(i);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
loop {
if let Success(n) = q.steal() {
v[n].fetch_add(1, SeqCst);
break;
}
#[cfg(miri)]
std::hint::spin_loop();
}
}
});
}
})
.unwrap();
for c in v {
assert_eq!(c.load(SeqCst), THREADS);
}
}
#[test]
fn stampede() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
let q = Injector::new();
for i in 0..COUNT {
q.push(Box::new(i + 1));
}
let remaining = Arc::new(AtomicUsize::new(COUNT));
scope(|scope| {
for _ in 0..THREADS {
let remaining = remaining.clone();
let q = &q;
scope.spawn(move |_| {
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Success(x) = q.steal() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
});
}
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Success(x) = q.steal() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
})
.unwrap();
}
#[test]
fn stress() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
let q = Injector::new();
let done = Arc::new(AtomicBool::new(false));
let hits = Arc::new(AtomicUsize::new(0));
scope(|scope| {
for _ in 0..THREADS {
let done = done.clone();
let hits = hits.clone();
let q = &q;
scope.spawn(move |_| {
let w2 = Worker::new_fifo();
while !done.load(SeqCst) {
if let Success(_) = q.steal() {
hits.fetch_add(1, SeqCst);
}
let _ = q.steal_batch(&w2);
if let Success(_) = q.steal_batch_and_pop(&w2) {
hits.fetch_add(1, SeqCst);
}
while w2.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
});
}
let mut rng = rand::thread_rng();
let mut expected = 0;
while expected < COUNT {
if rng.gen_range(0..3) == 0 {
while let Success(_) = q.steal() {
hits.fetch_add(1, SeqCst);
}
} else {
q.push(expected);
expected += 1;
}
}
while hits.load(SeqCst) < COUNT {
while let Success(_) = q.steal() {
hits.fetch_add(1, SeqCst);
}
}
done.store(true, SeqCst);
})
.unwrap();
}
#[cfg_attr(miri, ignore)] // Miri is too slow
#[test]
fn no_starvation() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let q = Injector::new();
let done = Arc::new(AtomicBool::new(false));
let mut all_hits = Vec::new();
scope(|scope| {
for _ in 0..THREADS {
let done = done.clone();
let hits = Arc::new(AtomicUsize::new(0));
all_hits.push(hits.clone());
let q = &q;
scope.spawn(move |_| {
let w2 = Worker::new_fifo();
while !done.load(SeqCst) {
if let Success(_) = q.steal() {
hits.fetch_add(1, SeqCst);
}
let _ = q.steal_batch(&w2);
if let Success(_) = q.steal_batch_and_pop(&w2) {
hits.fetch_add(1, SeqCst);
}
while w2.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
});
}
let mut rng = rand::thread_rng();
let mut my_hits = 0;
loop {
for i in 0..rng.gen_range(0..COUNT) {
if rng.gen_range(0..3) == 0 && my_hits == 0 {
while let Success(_) = q.steal() {
my_hits += 1;
}
} else {
q.push(i);
}
}
if my_hits > 0 && all_hits.iter().all(|h| h.load(SeqCst) > 0) {
break;
}
}
done.store(true, SeqCst);
})
.unwrap();
}
#[test]
fn destructors() {
#[cfg(miri)]
const THREADS: usize = 2;
#[cfg(not(miri))]
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
#[cfg(miri)]
const STEPS: usize = 100;
#[cfg(not(miri))]
const STEPS: usize = 1000;
struct Elem(usize, Arc<Mutex<Vec<usize>>>);
impl Drop for Elem {
fn drop(&mut self) {
self.1.lock().unwrap().push(self.0);
}
}
let q = Injector::new();
let dropped = Arc::new(Mutex::new(Vec::new()));
let remaining = Arc::new(AtomicUsize::new(COUNT));
for i in 0..COUNT {
q.push(Elem(i, dropped.clone()));
}
scope(|scope| {
for _ in 0..THREADS {
let remaining = remaining.clone();
let q = &q;
scope.spawn(move |_| {
let w2 = Worker::new_fifo();
let mut cnt = 0;
while cnt < STEPS {
if let Success(_) = q.steal() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
let _ = q.steal_batch(&w2);
if let Success(_) = q.steal_batch_and_pop(&w2) {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
while w2.pop().is_some() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
}
});
}
for _ in 0..STEPS {
if let Success(_) = q.steal() {
remaining.fetch_sub(1, SeqCst);
}
}
})
.unwrap();
let rem = remaining.load(SeqCst);
assert!(rem > 0);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), COUNT - rem);
v.clear();
}
drop(q);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), rem);
v.sort_unstable();
for pair in v.windows(2) {
assert_eq!(pair[0] + 1, pair[1]);
}
}
}

View file

@ -1,359 +0,0 @@
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use crossbeam_deque::Steal::{Empty, Success};
use crossbeam_deque::Worker;
use crossbeam_utils::thread::scope;
use rand::Rng;
#[test]
fn smoke() {
let w = Worker::new_lifo();
let s = w.stealer();
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(1);
assert_eq!(w.pop(), Some(1));
assert_eq!(w.pop(), None);
assert_eq!(s.steal(), Empty);
w.push(2);
assert_eq!(s.steal(), Success(2));
assert_eq!(s.steal(), Empty);
assert_eq!(w.pop(), None);
w.push(3);
w.push(4);
w.push(5);
assert_eq!(s.steal(), Success(3));
assert_eq!(s.steal(), Success(4));
assert_eq!(s.steal(), Success(5));
assert_eq!(s.steal(), Empty);
w.push(6);
w.push(7);
w.push(8);
w.push(9);
assert_eq!(w.pop(), Some(9));
assert_eq!(s.steal(), Success(6));
assert_eq!(w.pop(), Some(8));
assert_eq!(w.pop(), Some(7));
assert_eq!(w.pop(), None);
}
#[test]
fn is_empty() {
let w = Worker::new_lifo();
let s = w.stealer();
assert!(w.is_empty());
w.push(1);
assert!(!w.is_empty());
w.push(2);
assert!(!w.is_empty());
let _ = w.pop();
assert!(!w.is_empty());
let _ = w.pop();
assert!(w.is_empty());
assert!(s.is_empty());
w.push(1);
assert!(!s.is_empty());
w.push(2);
assert!(!s.is_empty());
let _ = s.steal();
assert!(!s.is_empty());
let _ = s.steal();
assert!(s.is_empty());
}
#[test]
fn spsc() {
#[cfg(miri)]
const STEPS: usize = 500;
#[cfg(not(miri))]
const STEPS: usize = 50_000;
let w = Worker::new_lifo();
let s = w.stealer();
scope(|scope| {
scope.spawn(|_| {
for i in 0..STEPS {
loop {
if let Success(v) = s.steal() {
assert_eq!(i, v);
break;
}
#[cfg(miri)]
std::hint::spin_loop();
}
}
assert_eq!(s.steal(), Empty);
});
for i in 0..STEPS {
w.push(i);
}
})
.unwrap();
}
#[test]
fn stampede() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
let w = Worker::new_lifo();
for i in 0..COUNT {
w.push(Box::new(i + 1));
}
let remaining = Arc::new(AtomicUsize::new(COUNT));
scope(|scope| {
for _ in 0..THREADS {
let s = w.stealer();
let remaining = remaining.clone();
scope.spawn(move |_| {
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Success(x) = s.steal() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
});
}
let mut last = COUNT + 1;
while remaining.load(SeqCst) > 0 {
if let Some(x) = w.pop() {
assert!(last > *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
})
.unwrap();
}
#[test]
fn stress() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
let w = Worker::new_lifo();
let done = Arc::new(AtomicBool::new(false));
let hits = Arc::new(AtomicUsize::new(0));
scope(|scope| {
for _ in 0..THREADS {
let s = w.stealer();
let done = done.clone();
let hits = hits.clone();
scope.spawn(move |_| {
let w2 = Worker::new_lifo();
while !done.load(SeqCst) {
if let Success(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
let _ = s.steal_batch(&w2);
if let Success(_) = s.steal_batch_and_pop(&w2) {
hits.fetch_add(1, SeqCst);
}
while w2.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
});
}
let mut rng = rand::thread_rng();
let mut expected = 0;
while expected < COUNT {
if rng.gen_range(0..3) == 0 {
while w.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
} else {
w.push(expected);
expected += 1;
}
}
while hits.load(SeqCst) < COUNT {
while w.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
done.store(true, SeqCst);
})
.unwrap();
}
#[cfg_attr(miri, ignore)] // Miri is too slow
#[test]
fn no_starvation() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let w = Worker::new_lifo();
let done = Arc::new(AtomicBool::new(false));
let mut all_hits = Vec::new();
scope(|scope| {
for _ in 0..THREADS {
let s = w.stealer();
let done = done.clone();
let hits = Arc::new(AtomicUsize::new(0));
all_hits.push(hits.clone());
scope.spawn(move |_| {
let w2 = Worker::new_lifo();
while !done.load(SeqCst) {
if let Success(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
let _ = s.steal_batch(&w2);
if let Success(_) = s.steal_batch_and_pop(&w2) {
hits.fetch_add(1, SeqCst);
}
while w2.pop().is_some() {
hits.fetch_add(1, SeqCst);
}
}
});
}
let mut rng = rand::thread_rng();
let mut my_hits = 0;
loop {
for i in 0..rng.gen_range(0..COUNT) {
if rng.gen_range(0..3) == 0 && my_hits == 0 {
while w.pop().is_some() {
my_hits += 1;
}
} else {
w.push(i);
}
}
if my_hits > 0 && all_hits.iter().all(|h| h.load(SeqCst) > 0) {
break;
}
}
done.store(true, SeqCst);
})
.unwrap();
}
#[test]
fn destructors() {
#[cfg(miri)]
const THREADS: usize = 2;
#[cfg(not(miri))]
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 50_000;
#[cfg(miri)]
const STEPS: usize = 100;
#[cfg(not(miri))]
const STEPS: usize = 1000;
struct Elem(usize, Arc<Mutex<Vec<usize>>>);
impl Drop for Elem {
fn drop(&mut self) {
self.1.lock().unwrap().push(self.0);
}
}
let w = Worker::new_lifo();
let dropped = Arc::new(Mutex::new(Vec::new()));
let remaining = Arc::new(AtomicUsize::new(COUNT));
for i in 0..COUNT {
w.push(Elem(i, dropped.clone()));
}
scope(|scope| {
for _ in 0..THREADS {
let remaining = remaining.clone();
let s = w.stealer();
scope.spawn(move |_| {
let w2 = Worker::new_lifo();
let mut cnt = 0;
while cnt < STEPS {
if let Success(_) = s.steal() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
let _ = s.steal_batch(&w2);
if let Success(_) = s.steal_batch_and_pop(&w2) {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
while w2.pop().is_some() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
}
});
}
for _ in 0..STEPS {
if w.pop().is_some() {
remaining.fetch_sub(1, SeqCst);
}
}
})
.unwrap();
let rem = remaining.load(SeqCst);
assert!(rem > 0);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), COUNT - rem);
v.clear();
}
drop(w);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), rem);
v.sort_unstable();
for pair in v.windows(2) {
assert_eq!(pair[0] + 1, pair[1]);
}
}
}

View file

@ -1,212 +0,0 @@
use crossbeam_deque::Steal::Success;
use crossbeam_deque::{Injector, Worker};
#[test]
fn steal_fifo() {
let w = Worker::new_fifo();
for i in 1..=3 {
w.push(i);
}
let s = w.stealer();
assert_eq!(s.steal(), Success(1));
assert_eq!(s.steal(), Success(2));
assert_eq!(s.steal(), Success(3));
}
#[test]
fn steal_lifo() {
let w = Worker::new_lifo();
for i in 1..=3 {
w.push(i);
}
let s = w.stealer();
assert_eq!(s.steal(), Success(1));
assert_eq!(s.steal(), Success(2));
assert_eq!(s.steal(), Success(3));
}
#[test]
fn steal_injector() {
let q = Injector::new();
for i in 1..=3 {
q.push(i);
}
assert_eq!(q.steal(), Success(1));
assert_eq!(q.steal(), Success(2));
assert_eq!(q.steal(), Success(3));
}
#[test]
fn steal_batch_fifo_fifo() {
let w = Worker::new_fifo();
for i in 1..=4 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_fifo();
assert_eq!(s.steal_batch(&w2), Success(()));
assert_eq!(w2.pop(), Some(1));
assert_eq!(w2.pop(), Some(2));
}
#[test]
fn steal_batch_lifo_lifo() {
let w = Worker::new_lifo();
for i in 1..=4 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_lifo();
assert_eq!(s.steal_batch(&w2), Success(()));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(1));
}
#[test]
fn steal_batch_fifo_lifo() {
let w = Worker::new_fifo();
for i in 1..=4 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_lifo();
assert_eq!(s.steal_batch(&w2), Success(()));
assert_eq!(w2.pop(), Some(1));
assert_eq!(w2.pop(), Some(2));
}
#[test]
fn steal_batch_lifo_fifo() {
let w = Worker::new_lifo();
for i in 1..=4 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_fifo();
assert_eq!(s.steal_batch(&w2), Success(()));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(1));
}
#[test]
fn steal_batch_injector_fifo() {
let q = Injector::new();
for i in 1..=4 {
q.push(i);
}
let w2 = Worker::new_fifo();
assert_eq!(q.steal_batch(&w2), Success(()));
assert_eq!(w2.pop(), Some(1));
assert_eq!(w2.pop(), Some(2));
}
#[test]
fn steal_batch_injector_lifo() {
let q = Injector::new();
for i in 1..=4 {
q.push(i);
}
let w2 = Worker::new_lifo();
assert_eq!(q.steal_batch(&w2), Success(()));
assert_eq!(w2.pop(), Some(1));
assert_eq!(w2.pop(), Some(2));
}
#[test]
fn steal_batch_and_pop_fifo_fifo() {
let w = Worker::new_fifo();
for i in 1..=6 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_fifo();
assert_eq!(s.steal_batch_and_pop(&w2), Success(1));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(3));
}
#[test]
fn steal_batch_and_pop_lifo_lifo() {
let w = Worker::new_lifo();
for i in 1..=6 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_lifo();
assert_eq!(s.steal_batch_and_pop(&w2), Success(3));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(1));
}
#[test]
fn steal_batch_and_pop_fifo_lifo() {
let w = Worker::new_fifo();
for i in 1..=6 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_lifo();
assert_eq!(s.steal_batch_and_pop(&w2), Success(1));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(3));
}
#[test]
fn steal_batch_and_pop_lifo_fifo() {
let w = Worker::new_lifo();
for i in 1..=6 {
w.push(i);
}
let s = w.stealer();
let w2 = Worker::new_fifo();
assert_eq!(s.steal_batch_and_pop(&w2), Success(3));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(1));
}
#[test]
fn steal_batch_and_pop_injector_fifo() {
let q = Injector::new();
for i in 1..=6 {
q.push(i);
}
let w2 = Worker::new_fifo();
assert_eq!(q.steal_batch_and_pop(&w2), Success(1));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(3));
}
#[test]
fn steal_batch_and_pop_injector_lifo() {
let q = Injector::new();
for i in 1..=6 {
q.push(i);
}
let w2 = Worker::new_lifo();
assert_eq!(q.steal_batch_and_pop(&w2), Success(1));
assert_eq!(w2.pop(), Some(2));
assert_eq!(w2.pop(), Some(3));
}

View file

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"678c7c5b4e522345076d63e8f24f7ab4dc9f6b7428ca2f665e4017f7ef24a087","Cargo.lock":"fd85f51f6b4a2dabbb41d9f96775abd21d89221fa01c154afac970c539022f17","Cargo.toml":"cfbceb820c7a1519351826839decd3ff1b1ad54ef2c4dfc4d2c9f173e4726046","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"6ba897c52496a66705df72da33dea5f6e0ce5caa87c4ff073b0faf4e05516dad","benches/defer.rs":"c330b704d96b2ad1aed29f72c37a99da534adef8cb06a3976d5f93bf567abb20","benches/flush.rs":"0389ac6c473632f0e93c962f223404cc360257f6699b4ec90b9b3be16bb6d74f","benches/pin.rs":"2f649a5153745c7930efdb32a52f9dc522f7b8cf548a251c5e2c82ee25dc3fff","examples/sanitize.rs":"a39d1635fa61e643e59192d7a63becc97ff81f03c1f4e03d38cedefb1525026a","src/atomic.rs":"48b8b02d1e0235b2d87342c13c09d778fba076f79addef32294bed5b8f67b21a","src/collector.rs":"df05c7573413a8f3ac933de7cf941d24bd0ca7341f5923dcad2f811a020c49eb","src/default.rs":"8196e9a2a7a43fdd668177585ba1d4deaec2d16a8a9532f819e4d9afd64ca73d","src/deferred.rs":"092c49e65d5f0ccad8c868b9bcaf431b580c98b7efed98c3797d82d0b9d0c471","src/epoch.rs":"e6813975198df667423c7e1911f7a0f5cb3a917e56080eecd6250d9cca7af950","src/guard.rs":"8db7a20503f55e9e29fc1cf33f99522ec0a5873683ab16638e0e55c917bfc30a","src/internal.rs":"74a15b34b235ab428ffa41cb3a01930e29e3f91e35a288f8f6e0c3c2f56e63f6","src/lib.rs":"3f81f1727c3f74114fbd2f9225a4899834fc254f1444f7c7355901c8fd755494","src/sync/list.rs":"10aa4c59845ab9ff1d8bcb6f594b70bbe23c320fa7a2b125fdf85df88b9d61e2","src/sync/mod.rs":"326e32489d467e974c441120640a8338aa55da55c24b20276075ce9053997326","src/sync/once_lock.rs":"aa8f957604d1119c4fc7038a18c14a6281230e81005f31201c099acff284ad4b","src/sync/queue.rs":"d4ad500501c52a90b6624dc31196793be09bd19e9c298d5dd7b3ae37bee6b6a8","tests/loom.rs":"db772f4478966de6ec98774ca4093171dc942da635822a0d2d3257d31188cb9b"},"package":"5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"}

View file

@ -1,204 +0,0 @@
# Version 0.9.18
- Remove dependency on `cfg-if`. (#1072)
- Remove dependency on `autocfg`. (#1071)
# Version 0.9.17
- Remove dependency on `memoffset`. (#1058)
# Version 0.9.16
- Bump the minimum supported Rust version to 1.61. (#1037)
- Improve support for targets without atomic CAS. (#1037)
- Remove build script. (#1037)
- Remove dependency on `scopeguard`. (#1045)
- Update `loom` dependency to 0.7.
# Version 0.9.15
- Update `memoffset` to 0.9. (#981)
# Version 0.9.14
- Update `memoffset` to 0.8. (#955)
# Version 0.9.13
- Fix build script bug introduced in 0.9.12. (#932)
# Version 0.9.12
**Note:** This release has been yanked due to regression fixed in 0.9.13.
- Update `memoffset` to 0.7. (#926)
- Improve support for custom targets. (#922)
# Version 0.9.11
- Removes the dependency on the `once_cell` crate to restore the MSRV. (#913)
- Work around [rust-lang#98302](https://github.com/rust-lang/rust/issues/98302), which causes compile error on windows-gnu when LTO is enabled. (#913)
# Version 0.9.10
- Bump the minimum supported Rust version to 1.38. (#877)
- Mitigate the risk of segmentation faults in buggy downstream implementations. (#879)
- Add `{Atomic, Shared}::try_into_owned` (#701)
# Version 0.9.9
- Replace lazy_static with once_cell. (#817)
# Version 0.9.8
- Make `Atomic::null()` const function at 1.61+. (#797)
# Version 0.9.7
- Fix Miri error when `-Zmiri-check-number-validity` is enabled. (#779)
# Version 0.9.6
- Add `Atomic::fetch_update`. (#706)
# Version 0.9.5
- Fix UB in `Pointable` impl of `[MaybeUninit<T>]`. (#694)
- Support targets that do not have atomic CAS on stable Rust. (#698)
- Fix breakage with nightly feature due to rust-lang/rust#84510. (#692)
# Version 0.9.4
**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details.
- Fix UB in `<[MaybeUninit<T>] as Pointable>::init` when global allocator failed allocation. (#690)
- Bump `loom` dependency to version 0.5. (#686)
# Version 0.9.3
**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details.
- Make `loom` dependency optional. (#666)
# Version 0.9.2
**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details.
- Add `Atomic::compare_exchange` and `Atomic::compare_exchange_weak`. (#628)
- Deprecate `Atomic::compare_and_set` and `Atomic::compare_and_set_weak`. Use `Atomic::compare_exchange` or `Atomic::compare_exchange_weak` instead. (#628)
- Make `const_fn` dependency optional. (#611)
- Add unstable support for `loom`. (#487)
# Version 0.9.1
**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details.
- Bump `memoffset` dependency to version 0.6. (#592)
# Version 0.9.0
**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details.
- Bump the minimum supported Rust version to 1.36.
- Support dynamically sized types.
# Version 0.8.2
- Fix bug in release (yanking 0.8.1)
# Version 0.8.1
- Bump `autocfg` dependency to version 1.0. (#460)
- Reduce stall in list iteration. (#376)
- Stop stealing from the same deque. (#448)
- Fix unsoundness issues by adopting `MaybeUninit`. (#458)
- Fix use-after-free in lock-free queue. (#466)
# Version 0.8.0
- Bump the minimum required version to 1.28.
- Fix breakage with nightly feature due to rust-lang/rust#65214.
- Make `Atomic::null()` const function at 1.31+.
- Bump `crossbeam-utils` to `0.7`.
# Version 0.7.2
- Add `Atomic::into_owned()`.
- Update `memoffset` dependency.
# Version 0.7.1
- Add `Shared::deref_mut()`.
- Add a Treiber stack to examples.
# Version 0.7.0
- Remove `Guard::clone()`.
- Bump dependencies.
# Version 0.6.1
- Update `crossbeam-utils` to `0.6`.
# Version 0.6.0
- `defer` now requires `F: Send + 'static`.
- Bump the minimum Rust version to 1.26.
- Pinning while TLS is tearing down does not fail anymore.
- Rename `Handle` to `LocalHandle`.
- Add `defer_unchecked` and `defer_destroy`.
- Remove `Clone` impl for `LocalHandle`.
# Version 0.5.2
- Update `crossbeam-utils` to `0.5`.
# Version 0.5.1
- Fix compatibility with the latest Rust nightly.
# Version 0.5.0
- Update `crossbeam-utils` to `0.4`.
- Specify the minimum Rust version to `1.25.0`.
# Version 0.4.3
- Downgrade `crossbeam-utils` to `0.3` because it was a breaking change.
# Version 0.4.2
- Expose the `Pointer` trait.
- Warn missing docs and missing debug impls.
- Update `crossbeam-utils` to `0.4`.
# Version 0.4.1
- Add `Debug` impls for `Collector`, `Handle`, and `Guard`.
- Add `load_consume` to `Atomic`.
- Rename `Collector::handle` to `Collector::register`.
- Remove the `Send` implementation for `Handle` (this was a bug). Only
`Collector`s can be shared among multiple threads, while `Handle`s and
`Guard`s must stay within the thread in which they were created.
# Version 0.4.0
- Update dependencies.
- Remove support for Rust 1.13.
# Version 0.3.0
- Add support for Rust 1.13.
- Improve documentation for CAS.
# Version 0.2.0
- Add method `Owned::into_box`.
- Fix a use-after-free bug in `Local::finalize`.
- Fix an ordering bug in `Global::push_bag`.
- Fix a bug in calculating distance between epochs.
- Remove `impl<T> Into<Box<T>> for Owned<T>`.
# Version 0.1.0
- First version of the new epoch-based GC.

View file

@ -1,457 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
dependencies = [
"memchr",
]
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
dependencies = [
"crossbeam-utils",
"loom",
"rand",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c"
dependencies = [
"cfg-if",
"loom",
]
[[package]]
name = "generator"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5cc16584ff22b460a382b7feec54b23d2908d858152e5739a120b949293bd74e"
dependencies = [
"cc",
"libc",
"log",
"rustversion",
"windows",
]
[[package]]
name = "getrandom"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "loom"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e045d70ddfbc984eacfa964ded019534e8f6cbf36f6410aee0ed5cefa5a9175"
dependencies = [
"cfg-if",
"generator",
"scoped-tls",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "matchers"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
dependencies = [
"regex-automata 0.1.10",
]
[[package]]
name = "memchr"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
dependencies = [
"overload",
"winapi",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "ppv-lite86"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "regex"
version = "1.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata 0.4.3",
"regex-syntax 0.8.2",
]
[[package]]
name = "regex-automata"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
dependencies = [
"regex-syntax 0.6.29",
]
[[package]]
name = "regex-automata"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.2",
]
[[package]]
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "rustversion"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "scoped-tls"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
[[package]]
name = "sharded-slab"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
dependencies = [
"lazy_static",
]
[[package]]
name = "smallvec"
version = "1.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"
[[package]]
name = "syn"
version = "2.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thread_local"
version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
dependencies = [
"cfg-if",
"once_cell",
]
[[package]]
name = "tracing"
version = "0.1.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
dependencies = [
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
dependencies = [
"once_cell",
"valuable",
]
[[package]]
name = "tracing-log"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
dependencies = [
"log",
"once_cell",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b"
dependencies = [
"matchers",
"nu-ansi-term",
"once_cell",
"regex",
"sharded-slab",
"smallvec",
"thread_local",
"tracing",
"tracing-core",
"tracing-log",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "valuable"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"

View file

@ -1,57 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.61"
name = "crossbeam-epoch"
version = "0.9.18"
description = "Epoch-based garbage collection"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch"
readme = "README.md"
keywords = [
"lock-free",
"rcu",
"atomic",
"garbage",
]
categories = [
"concurrency",
"memory-management",
"no-std",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.crossbeam-utils]
version = "0.8.18"
default-features = false
[dev-dependencies.rand]
version = "0.8"
[features]
alloc = []
default = ["std"]
loom = [
"loom-crate",
"crossbeam-utils/loom",
]
nightly = ["crossbeam-utils/nightly"]
std = [
"alloc",
"crossbeam-utils/std",
]
[target."cfg(crossbeam_loom)".dependencies.loom-crate]
version = "0.7.1"
optional = true
package = "loom"

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,27 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 The Crossbeam Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -1,53 +0,0 @@
# Crossbeam Epoch
[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)](
https://github.com/crossbeam-rs/crossbeam/actions)
[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch#license)
[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](
https://crates.io/crates/crossbeam-epoch)
[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](
https://docs.rs/crossbeam-epoch)
[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)](
https://www.rust-lang.org)
[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ)
This crate provides epoch-based garbage collection for building concurrent data structures.
When a thread removes an object from a concurrent data structure, other threads
may be still using pointers to it at the same time, so it cannot be destroyed
immediately. Epoch-based GC is an efficient mechanism for deferring destruction of
shared objects until no pointers to them can exist.
Everything in this crate except the global GC can be used in `no_std` environments, provided that
`alloc` feature is enabled.
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-epoch = "0.9"
```
## Compatibility
Crossbeam Epoch supports stable Rust releases going back at least six months,
and every time the minimum supported Rust version is increased, a new minor
version is released. Currently, the minimum supported Rust version is 1.61.
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View file

@ -1,69 +0,0 @@
#![feature(test)]
extern crate test;
use crossbeam_epoch::{self as epoch, Owned};
use crossbeam_utils::thread::scope;
use test::Bencher;
#[bench]
fn single_alloc_defer_free(b: &mut Bencher) {
b.iter(|| {
let guard = &epoch::pin();
let p = Owned::new(1).into_shared(guard);
unsafe {
guard.defer_destroy(p);
}
});
}
#[bench]
fn single_defer(b: &mut Bencher) {
b.iter(|| {
let guard = &epoch::pin();
guard.defer(move || ());
});
}
#[bench]
fn multi_alloc_defer_free(b: &mut Bencher) {
const THREADS: usize = 16;
const STEPS: usize = 10_000;
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
for _ in 0..STEPS {
let guard = &epoch::pin();
let p = Owned::new(1).into_shared(guard);
unsafe {
guard.defer_destroy(p);
}
}
});
}
})
.unwrap();
});
}
#[bench]
fn multi_defer(b: &mut Bencher) {
const THREADS: usize = 16;
const STEPS: usize = 10_000;
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
for _ in 0..STEPS {
let guard = &epoch::pin();
guard.defer(move || ());
}
});
}
})
.unwrap();
});
}

View file

@ -1,52 +0,0 @@
#![feature(test)]
extern crate test;
use std::sync::Barrier;
use crossbeam_epoch as epoch;
use crossbeam_utils::thread::scope;
use test::Bencher;
#[bench]
fn single_flush(b: &mut Bencher) {
const THREADS: usize = 16;
let start = Barrier::new(THREADS + 1);
let end = Barrier::new(THREADS + 1);
scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
epoch::pin();
start.wait();
end.wait();
});
}
start.wait();
b.iter(|| epoch::pin().flush());
end.wait();
})
.unwrap();
}
#[bench]
fn multi_flush(b: &mut Bencher) {
const THREADS: usize = 16;
const STEPS: usize = 10_000;
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
for _ in 0..STEPS {
let guard = &epoch::pin();
guard.flush();
}
});
}
})
.unwrap();
});
}

View file

@ -1,31 +0,0 @@
#![feature(test)]
extern crate test;
use crossbeam_epoch as epoch;
use crossbeam_utils::thread::scope;
use test::Bencher;
#[bench]
fn single_pin(b: &mut Bencher) {
b.iter(epoch::pin);
}
#[bench]
fn multi_pin(b: &mut Bencher) {
const THREADS: usize = 16;
const STEPS: usize = 100_000;
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
for _ in 0..STEPS {
epoch::pin();
}
});
}
})
.unwrap();
});
}

View file

@ -1,66 +0,0 @@
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_epoch::{self as epoch, Atomic, Collector, LocalHandle, Owned, Shared};
use rand::Rng;
fn worker(a: Arc<Atomic<AtomicUsize>>, handle: LocalHandle) -> usize {
let mut rng = rand::thread_rng();
let mut sum = 0;
if rng.gen() {
thread::sleep(Duration::from_millis(1));
}
let timeout = Duration::from_millis(rng.gen_range(0..10));
let now = Instant::now();
while now.elapsed() < timeout {
for _ in 0..100 {
let guard = &handle.pin();
guard.flush();
let val = if rng.gen() {
let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard);
unsafe {
guard.defer_destroy(p);
guard.flush();
p.deref().load(Relaxed)
}
} else {
let p = a.load(Acquire, guard);
unsafe { p.deref().fetch_add(sum, Relaxed) }
};
sum = sum.wrapping_add(val);
}
}
sum
}
fn main() {
for _ in 0..100 {
let collector = Collector::new();
let a = Arc::new(Atomic::new(AtomicUsize::new(777)));
let threads = (0..16)
.map(|_| {
let a = a.clone();
let c = collector.clone();
thread::spawn(move || worker(a, c.register()))
})
.collect::<Vec<_>>();
for t in threads {
t.join().unwrap();
}
unsafe {
a.swap(Shared::null(), AcqRel, epoch::unprotected())
.into_owned();
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,464 +0,0 @@
/// Epoch-based garbage collector.
///
/// # Examples
///
/// ```
/// use crossbeam_epoch::Collector;
///
/// let collector = Collector::new();
///
/// let handle = collector.register();
/// drop(collector); // `handle` still works after dropping `collector`
///
/// handle.pin().flush();
/// ```
use core::fmt;
use crate::guard::Guard;
use crate::internal::{Global, Local};
use crate::primitive::sync::Arc;
/// An epoch-based garbage collector.
pub struct Collector {
pub(crate) global: Arc<Global>,
}
unsafe impl Send for Collector {}
unsafe impl Sync for Collector {}
impl Default for Collector {
fn default() -> Self {
Self {
global: Arc::new(Global::new()),
}
}
}
impl Collector {
/// Creates a new collector.
pub fn new() -> Self {
Self::default()
}
/// Registers a new handle for the collector.
pub fn register(&self) -> LocalHandle {
Local::register(self)
}
}
impl Clone for Collector {
/// Creates another reference to the same garbage collector.
fn clone(&self) -> Self {
Collector {
global: self.global.clone(),
}
}
}
impl fmt::Debug for Collector {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Collector { .. }")
}
}
impl PartialEq for Collector {
/// Checks if both handles point to the same collector.
fn eq(&self, rhs: &Collector) -> bool {
Arc::ptr_eq(&self.global, &rhs.global)
}
}
impl Eq for Collector {}
/// A handle to a garbage collector.
pub struct LocalHandle {
pub(crate) local: *const Local,
}
impl LocalHandle {
/// Pins the handle.
#[inline]
pub fn pin(&self) -> Guard {
unsafe { (*self.local).pin() }
}
/// Returns `true` if the handle is pinned.
#[inline]
pub fn is_pinned(&self) -> bool {
unsafe { (*self.local).is_pinned() }
}
/// Returns the `Collector` associated with this handle.
#[inline]
pub fn collector(&self) -> &Collector {
unsafe { (*self.local).collector() }
}
}
impl Drop for LocalHandle {
#[inline]
fn drop(&mut self) {
unsafe {
Local::release_handle(&*self.local);
}
}
}
impl fmt::Debug for LocalHandle {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("LocalHandle { .. }")
}
}
#[cfg(all(test, not(crossbeam_loom)))]
mod tests {
use std::mem::ManuallyDrop;
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_utils::thread;
use crate::{Collector, Owned};
const NUM_THREADS: usize = 8;
#[test]
fn pin_reentrant() {
let collector = Collector::new();
let handle = collector.register();
drop(collector);
assert!(!handle.is_pinned());
{
let _guard = &handle.pin();
assert!(handle.is_pinned());
{
let _guard = &handle.pin();
assert!(handle.is_pinned());
}
assert!(handle.is_pinned());
}
assert!(!handle.is_pinned());
}
#[test]
fn flush_local_bag() {
let collector = Collector::new();
let handle = collector.register();
drop(collector);
for _ in 0..100 {
let guard = &handle.pin();
unsafe {
let a = Owned::new(7).into_shared(guard);
guard.defer_destroy(a);
assert!(!(*guard.local).bag.with(|b| (*b).is_empty()));
while !(*guard.local).bag.with(|b| (*b).is_empty()) {
guard.flush();
}
}
}
}
#[test]
fn garbage_buffering() {
let collector = Collector::new();
let handle = collector.register();
drop(collector);
let guard = &handle.pin();
unsafe {
for _ in 0..10 {
let a = Owned::new(7).into_shared(guard);
guard.defer_destroy(a);
}
assert!(!(*guard.local).bag.with(|b| (*b).is_empty()));
}
}
#[test]
fn pin_holds_advance() {
#[cfg(miri)]
const N: usize = 500;
#[cfg(not(miri))]
const N: usize = 500_000;
let collector = Collector::new();
thread::scope(|scope| {
for _ in 0..NUM_THREADS {
scope.spawn(|_| {
let handle = collector.register();
for _ in 0..N {
let guard = &handle.pin();
let before = collector.global.epoch.load(Ordering::Relaxed);
collector.global.collect(guard);
let after = collector.global.epoch.load(Ordering::Relaxed);
assert!(after.wrapping_sub(before) <= 2);
}
});
}
})
.unwrap();
}
#[cfg(not(crossbeam_sanitize))] // TODO: assertions failed due to `cfg(crossbeam_sanitize)` reduce `internal::MAX_OBJECTS`
#[test]
fn incremental() {
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
unsafe {
let guard = &handle.pin();
for _ in 0..COUNT {
let a = Owned::new(7i32).into_shared(guard);
guard.defer_unchecked(move || {
drop(a.into_owned());
DESTROYS.fetch_add(1, Ordering::Relaxed);
});
}
guard.flush();
}
let mut last = 0;
while last < COUNT {
let curr = DESTROYS.load(Ordering::Relaxed);
assert!(curr - last <= 1024);
last = curr;
let guard = &handle.pin();
collector.global.collect(guard);
}
assert!(DESTROYS.load(Ordering::Relaxed) == COUNT);
}
#[test]
fn buffering() {
const COUNT: usize = 10;
#[cfg(miri)]
const N: usize = 500;
#[cfg(not(miri))]
const N: usize = 100_000;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
unsafe {
let guard = &handle.pin();
for _ in 0..COUNT {
let a = Owned::new(7i32).into_shared(guard);
guard.defer_unchecked(move || {
drop(a.into_owned());
DESTROYS.fetch_add(1, Ordering::Relaxed);
});
}
}
for _ in 0..N {
collector.global.collect(&handle.pin());
}
assert!(DESTROYS.load(Ordering::Relaxed) < COUNT);
handle.pin().flush();
while DESTROYS.load(Ordering::Relaxed) < COUNT {
let guard = &handle.pin();
collector.global.collect(guard);
}
assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
}
#[test]
fn count_drops() {
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct Elem(#[allow(dead_code)] i32);
impl Drop for Elem {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::Relaxed);
}
}
let collector = Collector::new();
let handle = collector.register();
unsafe {
let guard = &handle.pin();
for _ in 0..COUNT {
let a = Owned::new(Elem(7i32)).into_shared(guard);
guard.defer_destroy(a);
}
guard.flush();
}
while DROPS.load(Ordering::Relaxed) < COUNT {
let guard = &handle.pin();
collector.global.collect(guard);
}
assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
}
#[test]
fn count_destroy() {
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
unsafe {
let guard = &handle.pin();
for _ in 0..COUNT {
let a = Owned::new(7i32).into_shared(guard);
guard.defer_unchecked(move || {
drop(a.into_owned());
DESTROYS.fetch_add(1, Ordering::Relaxed);
});
}
guard.flush();
}
while DESTROYS.load(Ordering::Relaxed) < COUNT {
let guard = &handle.pin();
collector.global.collect(guard);
}
assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
}
#[test]
fn drop_array() {
const COUNT: usize = 700;
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct Elem(#[allow(dead_code)] i32);
impl Drop for Elem {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::Relaxed);
}
}
let collector = Collector::new();
let handle = collector.register();
let mut guard = handle.pin();
let mut v = Vec::with_capacity(COUNT);
for i in 0..COUNT {
v.push(Elem(i as i32));
}
{
let a = Owned::new(v).into_shared(&guard);
unsafe {
guard.defer_destroy(a);
}
guard.flush();
}
while DROPS.load(Ordering::Relaxed) < COUNT {
guard.repin();
collector.global.collect(&guard);
}
assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
}
#[test]
fn destroy_array() {
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
unsafe {
let guard = &handle.pin();
let mut v = Vec::with_capacity(COUNT);
for i in 0..COUNT {
v.push(i as i32);
}
let len = v.len();
let cap = v.capacity();
let ptr = ManuallyDrop::new(v).as_mut_ptr();
guard.defer_unchecked(move || {
drop(Vec::from_raw_parts(ptr, len, cap));
DESTROYS.fetch_add(len, Ordering::Relaxed);
});
guard.flush();
}
while DESTROYS.load(Ordering::Relaxed) < COUNT {
let guard = &handle.pin();
collector.global.collect(guard);
}
assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
}
#[test]
fn stress() {
const THREADS: usize = 8;
#[cfg(miri)]
const COUNT: usize = 500;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct Elem(#[allow(dead_code)] i32);
impl Drop for Elem {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::Relaxed);
}
}
let collector = Collector::new();
thread::scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let handle = collector.register();
for _ in 0..COUNT {
let guard = &handle.pin();
unsafe {
let a = Owned::new(Elem(7i32)).into_shared(guard);
guard.defer_destroy(a);
}
}
});
}
})
.unwrap();
let handle = collector.register();
while DROPS.load(Ordering::Relaxed) < COUNT * THREADS {
let guard = &handle.pin();
collector.global.collect(guard);
}
assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS);
}
}

View file

@ -1,93 +0,0 @@
//! The default garbage collector.
//!
//! For each thread, a participant is lazily initialized on its first use, when the current thread
//! is registered in the default collector. If initialized, the thread's participant will get
//! destructed on thread exit, which in turn unregisters the thread.
use crate::collector::{Collector, LocalHandle};
use crate::guard::Guard;
use crate::primitive::thread_local;
#[cfg(not(crossbeam_loom))]
use crate::sync::once_lock::OnceLock;
fn collector() -> &'static Collector {
#[cfg(not(crossbeam_loom))]
{
/// The global data for the default garbage collector.
static COLLECTOR: OnceLock<Collector> = OnceLock::new();
COLLECTOR.get_or_init(Collector::new)
}
// FIXME: loom does not currently provide the equivalent of Lazy:
// https://github.com/tokio-rs/loom/issues/263
#[cfg(crossbeam_loom)]
{
loom::lazy_static! {
/// The global data for the default garbage collector.
static ref COLLECTOR: Collector = Collector::new();
}
&COLLECTOR
}
}
thread_local! {
/// The per-thread participant for the default garbage collector.
static HANDLE: LocalHandle = collector().register();
}
/// Pins the current thread.
#[inline]
pub fn pin() -> Guard {
with_handle(|handle| handle.pin())
}
/// Returns `true` if the current thread is pinned.
#[inline]
pub fn is_pinned() -> bool {
with_handle(|handle| handle.is_pinned())
}
/// Returns the default global collector.
pub fn default_collector() -> &'static Collector {
collector()
}
#[inline]
fn with_handle<F, R>(mut f: F) -> R
where
F: FnMut(&LocalHandle) -> R,
{
HANDLE
.try_with(|h| f(h))
.unwrap_or_else(|_| f(&collector().register()))
}
#[cfg(all(test, not(crossbeam_loom)))]
mod tests {
use crossbeam_utils::thread;
#[test]
fn pin_while_exiting() {
struct Foo;
impl Drop for Foo {
fn drop(&mut self) {
// Pin after `HANDLE` has been dropped. This must not panic.
super::pin();
}
}
thread_local! {
static FOO: Foo = const { Foo };
}
thread::scope(|scope| {
scope.spawn(|_| {
// Initialize `FOO` and then `HANDLE`.
FOO.with(|_| ());
super::pin();
// At thread exit, `HANDLE` gets dropped first and `FOO` second.
});
})
.unwrap();
}
}

View file

@ -1,146 +0,0 @@
use alloc::boxed::Box;
use core::fmt;
use core::marker::PhantomData;
use core::mem::{self, MaybeUninit};
use core::ptr;
/// Number of words a piece of `Data` can hold.
///
/// Three words should be enough for the majority of cases. For example, you can fit inside it the
/// function pointer together with a fat pointer representing an object that needs to be destroyed.
const DATA_WORDS: usize = 3;
/// Some space to keep a `FnOnce()` object on the stack.
type Data = [usize; DATA_WORDS];
/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap.
///
/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure.
pub(crate) struct Deferred {
call: unsafe fn(*mut u8),
data: MaybeUninit<Data>,
_marker: PhantomData<*mut ()>, // !Send + !Sync
}
impl fmt::Debug for Deferred {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.pad("Deferred { .. }")
}
}
impl Deferred {
pub(crate) const NO_OP: Self = {
fn no_op_call(_raw: *mut u8) {}
Self {
call: no_op_call,
data: MaybeUninit::uninit(),
_marker: PhantomData,
}
};
/// Constructs a new `Deferred` from a `FnOnce()`.
pub(crate) fn new<F: FnOnce()>(f: F) -> Self {
let size = mem::size_of::<F>();
let align = mem::align_of::<F>();
unsafe {
if size <= mem::size_of::<Data>() && align <= mem::align_of::<Data>() {
let mut data = MaybeUninit::<Data>::uninit();
ptr::write(data.as_mut_ptr().cast::<F>(), f);
unsafe fn call<F: FnOnce()>(raw: *mut u8) {
let f: F = ptr::read(raw.cast::<F>());
f();
}
Deferred {
call: call::<F>,
data,
_marker: PhantomData,
}
} else {
let b: Box<F> = Box::new(f);
let mut data = MaybeUninit::<Data>::uninit();
ptr::write(data.as_mut_ptr().cast::<Box<F>>(), b);
unsafe fn call<F: FnOnce()>(raw: *mut u8) {
// It's safe to cast `raw` from `*mut u8` to `*mut Box<F>`, because `raw` is
// originally derived from `*mut Box<F>`.
let b: Box<F> = ptr::read(raw.cast::<Box<F>>());
(*b)();
}
Deferred {
call: call::<F>,
data,
_marker: PhantomData,
}
}
}
}
/// Calls the function.
#[inline]
pub(crate) fn call(mut self) {
let call = self.call;
unsafe { call(self.data.as_mut_ptr().cast::<u8>()) };
}
}
#[cfg(all(test, not(crossbeam_loom)))]
mod tests {
use super::Deferred;
use std::cell::Cell;
use std::convert::identity;
#[test]
fn on_stack() {
let fired = &Cell::new(false);
let a = [0usize; 1];
let d = Deferred::new(move || {
let _ = identity(a);
fired.set(true);
});
assert!(!fired.get());
d.call();
assert!(fired.get());
}
#[test]
fn on_heap() {
let fired = &Cell::new(false);
let a = [0usize; 10];
let d = Deferred::new(move || {
let _ = identity(a);
fired.set(true);
});
assert!(!fired.get());
d.call();
assert!(fired.get());
}
#[test]
fn string() {
let a = "hello".to_string();
let d = Deferred::new(move || assert_eq!(a, "hello"));
d.call();
}
#[test]
fn boxed_slice_i32() {
let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice();
let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7]));
d.call();
}
#[test]
fn long_slice_usize() {
let a: [usize; 5] = [2, 3, 5, 7, 11];
let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11]));
d.call();
}
}

View file

@ -1,132 +0,0 @@
//! The global epoch
//!
//! The last bit in this number is unused and is always zero. Every so often the global epoch is
//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only
//! if all currently pinned participants have been pinned in the current epoch.
//!
//! If an object became garbage in some epoch, then we can be sure that after two advancements no
//! participant will hold a reference to it. That is the crux of safe memory reclamation.
use crate::primitive::sync::atomic::{AtomicUsize, Ordering};
/// An epoch that can be marked as pinned or unpinned.
///
/// Internally, the epoch is represented as an integer that wraps around at some unspecified point
/// and a flag that represents whether it is pinned or unpinned.
#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
pub(crate) struct Epoch {
/// The least significant bit is set if pinned. The rest of the bits hold the epoch.
data: usize,
}
impl Epoch {
/// Returns the starting epoch in unpinned state.
#[inline]
pub(crate) fn starting() -> Self {
Self::default()
}
/// Returns the number of epochs `self` is ahead of `rhs`.
///
/// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX
/// / 2)`, so the returned distance will be in the same interval.
pub(crate) fn wrapping_sub(self, rhs: Self) -> isize {
// The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`,
// because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)`
// will be ignored in the shift operation.
self.data.wrapping_sub(rhs.data & !1) as isize >> 1
}
/// Returns `true` if the epoch is marked as pinned.
#[inline]
pub(crate) fn is_pinned(self) -> bool {
(self.data & 1) == 1
}
/// Returns the same epoch, but marked as pinned.
#[inline]
pub(crate) fn pinned(self) -> Epoch {
Epoch {
data: self.data | 1,
}
}
/// Returns the same epoch, but marked as unpinned.
#[inline]
pub(crate) fn unpinned(self) -> Epoch {
Epoch {
data: self.data & !1,
}
}
/// Returns the successor epoch.
///
/// The returned epoch will be marked as pinned only if the previous one was as well.
#[inline]
pub(crate) fn successor(self) -> Epoch {
Epoch {
data: self.data.wrapping_add(2),
}
}
}
/// An atomic value that holds an `Epoch`.
#[derive(Default, Debug)]
pub(crate) struct AtomicEpoch {
/// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented
/// using an `AtomicUsize`.
data: AtomicUsize,
}
impl AtomicEpoch {
/// Creates a new atomic epoch.
#[inline]
pub(crate) fn new(epoch: Epoch) -> Self {
let data = AtomicUsize::new(epoch.data);
AtomicEpoch { data }
}
/// Loads a value from the atomic epoch.
#[inline]
pub(crate) fn load(&self, ord: Ordering) -> Epoch {
Epoch {
data: self.data.load(ord),
}
}
/// Stores a value into the atomic epoch.
#[inline]
pub(crate) fn store(&self, epoch: Epoch, ord: Ordering) {
self.data.store(epoch.data, ord);
}
/// Stores a value into the atomic epoch if the current value is the same as `current`.
///
/// The return value is a result indicating whether the new value was written and containing
/// the previous value. On success this value is guaranteed to be equal to `current`.
///
/// This method takes two `Ordering` arguments to describe the memory
/// ordering of this operation. `success` describes the required ordering for the
/// read-modify-write operation that takes place if the comparison with `current` succeeds.
/// `failure` describes the required ordering for the load operation that takes place when
/// the comparison fails. Using `Acquire` as success ordering makes the store part
/// of this operation `Relaxed`, and using `Release` makes the successful load
/// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
/// and must be equivalent to or weaker than the success ordering.
#[inline]
pub(crate) fn compare_exchange(
&self,
current: Epoch,
new: Epoch,
success: Ordering,
failure: Ordering,
) -> Result<Epoch, Epoch> {
match self
.data
.compare_exchange(current.data, new.data, success, failure)
{
Ok(data) => Ok(Epoch { data }),
Err(data) => Err(Epoch { data }),
}
}
}

View file

@ -1,523 +0,0 @@
use core::fmt;
use core::mem;
use crate::atomic::Shared;
use crate::collector::Collector;
use crate::deferred::Deferred;
use crate::internal::Local;
/// A guard that keeps the current thread pinned.
///
/// # Pinning
///
/// The current thread is pinned by calling [`pin`], which returns a new guard:
///
/// ```
/// use crossbeam_epoch as epoch;
///
/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
/// // This is not really necessary, but makes passing references to the guard a bit easier.
/// let guard = &epoch::pin();
/// ```
///
/// When a guard gets dropped, the current thread is automatically unpinned.
///
/// # Pointers on the stack
///
/// Having a guard allows us to create pointers on the stack to heap-allocated objects.
/// For example:
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic};
/// use std::sync::atomic::Ordering::SeqCst;
///
/// // Create a heap-allocated number.
/// let a = Atomic::new(777);
///
/// // Pin the current thread.
/// let guard = &epoch::pin();
///
/// // Load the heap-allocated object and create pointer `p` on the stack.
/// let p = a.load(SeqCst, guard);
///
/// // Dereference the pointer and print the value:
/// if let Some(num) = unsafe { p.as_ref() } {
/// println!("The number is {}.", num);
/// }
/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
///
/// # Multiple guards
///
/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
/// thread will actually be pinned only when the first guard is created and unpinned when the last
/// one is dropped:
///
/// ```
/// use crossbeam_epoch as epoch;
///
/// let guard1 = epoch::pin();
/// let guard2 = epoch::pin();
/// assert!(epoch::is_pinned());
/// drop(guard1);
/// assert!(epoch::is_pinned());
/// drop(guard2);
/// assert!(!epoch::is_pinned());
/// ```
///
/// [`pin`]: super::pin
pub struct Guard {
pub(crate) local: *const Local,
}
impl Guard {
/// Stores a function so that it can be executed at some point after all currently pinned
/// threads get unpinned.
///
/// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
/// becomes full, some functions are moved into the global cache. At the same time, some
/// functions from both local and global caches may get executed in order to incrementally
/// clean up the caches as they fill up.
///
/// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
/// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
/// never run, but the epoch-based garbage collection will make an effort to execute it
/// reasonably soon.
///
/// If this method is called from an [`unprotected`] guard, the function will simply be
/// executed immediately.
pub fn defer<F, R>(&self, f: F)
where
F: FnOnce() -> R,
F: Send + 'static,
{
unsafe {
self.defer_unchecked(f);
}
}
/// Stores a function so that it can be executed at some point after all currently pinned
/// threads get unpinned.
///
/// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
/// becomes full, some functions are moved into the global cache. At the same time, some
/// functions from both local and global caches may get executed in order to incrementally
/// clean up the caches as they fill up.
///
/// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
/// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
/// never run, but the epoch-based garbage collection will make an effort to execute it
/// reasonably soon.
///
/// If this method is called from an [`unprotected`] guard, the function will simply be
/// executed immediately.
///
/// # Safety
///
/// The given function must not hold reference onto the stack. It is highly recommended that
/// the passed function is **always** marked with `move` in order to prevent accidental
/// borrows.
///
/// ```
/// use crossbeam_epoch as epoch;
///
/// let guard = &epoch::pin();
/// let message = "Hello!";
/// unsafe {
/// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
/// guard.defer_unchecked(move || {
/// println!("{}", message);
/// });
/// }
/// ```
///
/// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
/// the closure must be `Send`.
///
/// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
/// `F: Send` for typical use cases. For example, consider the following code snippet, which
/// exemplifies the typical use case of deferring the deallocation of a shared reference:
///
/// ```ignore
/// let shared = Owned::new(7i32).into_shared(guard);
/// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
/// ```
///
/// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
/// because it's called only after the grace period and `shared` is no longer shared with other
/// threads. But we don't expect type systems to prove this.
///
/// # Examples
///
/// When a heap-allocated object in a data structure becomes unreachable, it has to be
/// deallocated. However, the current thread and other threads may be still holding references
/// on the stack to that same object. Therefore it cannot be deallocated before those references
/// get dropped. This method can defer deallocation until all those threads get unpinned and
/// consequently drop all their references on the stack.
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
/// use std::sync::atomic::Ordering::SeqCst;
///
/// let a = Atomic::new("foo");
///
/// // Now suppose that `a` is shared among multiple threads and concurrently
/// // accessed and modified...
///
/// // Pin the current thread.
/// let guard = &epoch::pin();
///
/// // Steal the object currently stored in `a` and swap it with another one.
/// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
///
/// if !p.is_null() {
/// // The object `p` is pointing to is now unreachable.
/// // Defer its deallocation until all currently pinned threads get unpinned.
/// unsafe {
/// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
/// guard.defer_unchecked(move || {
/// println!("{} is now being deallocated.", p.deref());
/// // Now we have unique access to the object pointed to by `p` and can turn it
/// // into an `Owned`. Dropping the `Owned` will deallocate the object.
/// drop(p.into_owned());
/// });
/// }
/// }
/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn defer_unchecked<F, R>(&self, f: F)
where
F: FnOnce() -> R,
{
if let Some(local) = self.local.as_ref() {
local.defer(Deferred::new(move || drop(f())), self);
} else {
drop(f());
}
}
/// Stores a destructor for an object so that it can be deallocated and dropped at some point
/// after all currently pinned threads get unpinned.
///
/// This method first stores the destructor into the thread-local (or handle-local) cache. If
/// this cache becomes full, some destructors are moved into the global cache. At the same
/// time, some destructors from both local and global caches may get executed in order to
/// incrementally clean up the caches as they fill up.
///
/// There is no guarantee when exactly the destructor will be executed. The only guarantee is
/// that it won't be executed until all currently pinned threads get unpinned. In theory, the
/// destructor might never run, but the epoch-based garbage collection will make an effort to
/// execute it reasonably soon.
///
/// If this method is called from an [`unprotected`] guard, the destructor will simply be
/// executed immediately.
///
/// # Safety
///
/// The object must not be reachable by other threads anymore, otherwise it might be still in
/// use when the destructor runs.
///
/// Apart from that, keep in mind that another thread may execute the destructor, so the object
/// must be sendable to other threads.
///
/// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
/// `T: Send` for typical use cases. For example, consider the following code snippet, which
/// exemplifies the typical use case of deferring the deallocation of a shared reference:
///
/// ```ignore
/// let shared = Owned::new(7i32).into_shared(guard);
/// guard.defer_destroy(shared); // `Shared` is not `Send`!
/// ```
///
/// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
/// it's called only after the grace period and `shared` is no longer shared with other
/// threads. But we don't expect type systems to prove this.
///
/// # Examples
///
/// When a heap-allocated object in a data structure becomes unreachable, it has to be
/// deallocated. However, the current thread and other threads may be still holding references
/// on the stack to that same object. Therefore it cannot be deallocated before those references
/// get dropped. This method can defer deallocation until all those threads get unpinned and
/// consequently drop all their references on the stack.
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
/// use std::sync::atomic::Ordering::SeqCst;
///
/// let a = Atomic::new("foo");
///
/// // Now suppose that `a` is shared among multiple threads and concurrently
/// // accessed and modified...
///
/// // Pin the current thread.
/// let guard = &epoch::pin();
///
/// // Steal the object currently stored in `a` and swap it with another one.
/// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
///
/// if !p.is_null() {
/// // The object `p` is pointing to is now unreachable.
/// // Defer its deallocation until all currently pinned threads get unpinned.
/// unsafe {
/// guard.defer_destroy(p);
/// }
/// }
/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) {
self.defer_unchecked(move || ptr.into_owned());
}
/// Clears up the thread-local cache of deferred functions by executing them or moving into the
/// global cache.
///
/// Call this method after deferring execution of a function if you want to get it executed as
/// soon as possible. Flushing will make sure it is residing in in the global cache, so that
/// any thread has a chance of taking the function and executing it.
///
/// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
///
/// # Examples
///
/// ```
/// use crossbeam_epoch as epoch;
///
/// let guard = &epoch::pin();
/// guard.defer(move || {
/// println!("This better be printed as soon as possible!");
/// });
/// guard.flush();
/// ```
pub fn flush(&self) {
if let Some(local) = unsafe { self.local.as_ref() } {
local.flush(self);
}
}
/// Unpins and then immediately re-pins the thread.
///
/// This method is useful when you don't want delay the advancement of the global epoch by
/// holding an old epoch. For safety, you should not maintain any guard-based reference across
/// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
/// is the only active guard for the current thread.
///
/// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
///
/// # Examples
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic};
/// use std::sync::atomic::Ordering::SeqCst;
///
/// let a = Atomic::new(777);
/// let mut guard = epoch::pin();
/// {
/// let p = a.load(SeqCst, &guard);
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
/// }
/// guard.repin();
/// {
/// let p = a.load(SeqCst, &guard);
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
/// }
/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn repin(&mut self) {
if let Some(local) = unsafe { self.local.as_ref() } {
local.repin();
}
}
/// Temporarily unpins the thread, executes the given function and then re-pins the thread.
///
/// This method is useful when you need to perform a long-running operation (e.g. sleeping)
/// and don't need to maintain any guard-based reference across the call (the latter is enforced
/// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
/// current thread.
///
/// If this method is called from an [`unprotected`] guard, then the passed function is called
/// directly without unpinning the thread.
///
/// # Examples
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic};
/// use std::sync::atomic::Ordering::SeqCst;
/// use std::thread;
/// use std::time::Duration;
///
/// let a = Atomic::new(777);
/// let mut guard = epoch::pin();
/// {
/// let p = a.load(SeqCst, &guard);
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
/// }
/// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
/// {
/// let p = a.load(SeqCst, &guard);
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
/// }
/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn repin_after<F, R>(&mut self, f: F) -> R
where
F: FnOnce() -> R,
{
// Ensure the Guard is re-pinned even if the function panics
struct ScopeGuard(*const Local);
impl Drop for ScopeGuard {
fn drop(&mut self) {
if let Some(local) = unsafe { self.0.as_ref() } {
mem::forget(local.pin());
local.release_handle();
}
}
}
if let Some(local) = unsafe { self.local.as_ref() } {
// We need to acquire a handle here to ensure the Local doesn't
// disappear from under us.
local.acquire_handle();
local.unpin();
}
let _guard = ScopeGuard(self.local);
f()
}
/// Returns the `Collector` associated with this guard.
///
/// This method is useful when you need to ensure that all guards used with
/// a data structure come from the same collector.
///
/// If this method is called from an [`unprotected`] guard, then `None` is returned.
///
/// # Examples
///
/// ```
/// use crossbeam_epoch as epoch;
///
/// let guard1 = epoch::pin();
/// let guard2 = epoch::pin();
/// assert!(guard1.collector() == guard2.collector());
/// ```
pub fn collector(&self) -> Option<&Collector> {
unsafe { self.local.as_ref().map(|local| local.collector()) }
}
}
impl Drop for Guard {
#[inline]
fn drop(&mut self) {
if let Some(local) = unsafe { self.local.as_ref() } {
local.unpin();
}
}
}
impl fmt::Debug for Guard {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("Guard { .. }")
}
}
/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
///
/// This guard should be used in special occasions only. Note that it doesn't actually keep any
/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
///
/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
/// execute the function immediately.
///
/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
///
/// # Safety
///
/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
/// [`Atomic`] is not being concurrently modified by other threads.
///
/// # Examples
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic};
/// use std::sync::atomic::Ordering::Relaxed;
///
/// let a = Atomic::new(7);
///
/// unsafe {
/// // Load `a` without pinning the current thread.
/// a.load(Relaxed, epoch::unprotected());
///
/// // It's possible to create more dummy guards.
/// let dummy = epoch::unprotected();
///
/// dummy.defer(move || {
/// println!("This gets executed immediately.");
/// });
///
/// // Dropping `dummy` doesn't affect the current thread - it's just a noop.
/// }
/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
///
/// The most common use of this function is when constructing or destructing a data structure.
///
/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
/// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
///
/// If we were to actually pin the current thread during destruction, that would just unnecessarily
/// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
/// is very helpful.
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic};
/// use std::mem::ManuallyDrop;
/// use std::sync::atomic::Ordering::Relaxed;
///
/// struct Stack<T> {
/// head: Atomic<Node<T>>,
/// }
///
/// struct Node<T> {
/// data: ManuallyDrop<T>,
/// next: Atomic<Node<T>>,
/// }
///
/// impl<T> Drop for Stack<T> {
/// fn drop(&mut self) {
/// unsafe {
/// // Unprotected load.
/// let mut node = self.head.load(Relaxed, epoch::unprotected());
///
/// while let Some(n) = node.as_ref() {
/// // Unprotected load.
/// let next = n.next.load(Relaxed, epoch::unprotected());
///
/// // Take ownership of the node, then drop its data and deallocate it.
/// let mut o = node.into_owned();
/// ManuallyDrop::drop(&mut o.data);
/// drop(o);
///
/// node = next;
/// }
/// }
/// }
/// }
/// ```
///
/// [`Atomic`]: super::Atomic
/// [`defer`]: Guard::defer
#[inline]
pub unsafe fn unprotected() -> &'static Guard {
// An unprotected guard is just a `Guard` with its field `local` set to null.
// We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in
// a `static`
struct GuardWrapper(Guard);
unsafe impl Sync for GuardWrapper {}
static UNPROTECTED: GuardWrapper = GuardWrapper(Guard {
local: core::ptr::null(),
});
&UNPROTECTED.0
}

View file

@ -1,600 +0,0 @@
//! The global data and participant for garbage collection.
//!
//! # Registration
//!
//! In order to track all participants in one place, we need some form of participant
//! registration. When a participant is created, it is registered to a global lock-free
//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the
//! list.
//!
//! # Pinning
//!
//! Every participant contains an integer that tells whether the participant is pinned and if so,
//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that
//! aids in periodic global epoch advancement.
//!
//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned.
//! Guards are necessary for performing atomic operations, and for freeing/dropping locations.
//!
//! # Thread-local bag
//!
//! Objects that get unlinked from concurrent data structures must be stashed away until the global
//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects
//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current
//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages
//! for amortizing the synchronization cost of pushing the garbages to a global queue.
//!
//! # Global queue
//!
//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and
//! destroyed along the way. This design reduces contention on data structures. The global queue
//! cannot be explicitly accessed: the only way to interact with it is by calling functions
//! `defer()` that adds an object to the thread-local bag, or `collect()` that manually triggers
//! garbage collection.
//!
//! Ideally each instance of concurrent data structure may have its own queue that gets fully
//! destroyed as soon as the data structure gets dropped.
use crate::primitive::cell::UnsafeCell;
use crate::primitive::sync::atomic::{self, Ordering};
use core::cell::Cell;
use core::mem::{self, ManuallyDrop};
use core::num::Wrapping;
use core::{fmt, ptr};
use crossbeam_utils::CachePadded;
use crate::atomic::{Owned, Shared};
use crate::collector::{Collector, LocalHandle};
use crate::deferred::Deferred;
use crate::epoch::{AtomicEpoch, Epoch};
use crate::guard::{unprotected, Guard};
use crate::sync::list::{Entry, IsElement, IterError, List};
use crate::sync::queue::Queue;
/// Maximum number of objects a bag can contain.
#[cfg(not(any(crossbeam_sanitize, miri)))]
const MAX_OBJECTS: usize = 64;
// Makes it more likely to trigger any potential data races.
#[cfg(any(crossbeam_sanitize, miri))]
const MAX_OBJECTS: usize = 4;
/// A bag of deferred functions.
pub(crate) struct Bag {
/// Stashed objects.
deferreds: [Deferred; MAX_OBJECTS],
len: usize,
}
/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions.
unsafe impl Send for Bag {}
impl Bag {
/// Returns a new, empty bag.
pub(crate) fn new() -> Self {
Self::default()
}
/// Returns `true` if the bag is empty.
pub(crate) fn is_empty(&self) -> bool {
self.len == 0
}
/// Attempts to insert a deferred function into the bag.
///
/// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is
/// full.
///
/// # Safety
///
/// It should be safe for another thread to execute the given function.
pub(crate) unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> {
if self.len < MAX_OBJECTS {
self.deferreds[self.len] = deferred;
self.len += 1;
Ok(())
} else {
Err(deferred)
}
}
/// Seals the bag with the given epoch.
fn seal(self, epoch: Epoch) -> SealedBag {
SealedBag { epoch, _bag: self }
}
}
impl Default for Bag {
fn default() -> Self {
Bag {
len: 0,
deferreds: [Deferred::NO_OP; MAX_OBJECTS],
}
}
}
impl Drop for Bag {
fn drop(&mut self) {
// Call all deferred functions.
for deferred in &mut self.deferreds[..self.len] {
let no_op = Deferred::NO_OP;
let owned_deferred = mem::replace(deferred, no_op);
owned_deferred.call();
}
}
}
// can't #[derive(Debug)] because Debug is not implemented for arrays 64 items long
impl fmt::Debug for Bag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Bag")
.field("deferreds", &&self.deferreds[..self.len])
.finish()
}
}
/// A pair of an epoch and a bag.
#[derive(Default, Debug)]
struct SealedBag {
epoch: Epoch,
_bag: Bag,
}
/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch.
unsafe impl Sync for SealedBag {}
impl SealedBag {
/// Checks if it is safe to drop the bag w.r.t. the given global epoch.
fn is_expired(&self, global_epoch: Epoch) -> bool {
// A pinned participant can witness at most one epoch advancement. Therefore, any bag that
// is within one epoch of the current one cannot be destroyed yet.
global_epoch.wrapping_sub(self.epoch) >= 2
}
}
/// The global data for a garbage collector.
pub(crate) struct Global {
/// The intrusive linked list of `Local`s.
locals: List<Local>,
/// The global queue of bags of deferred functions.
queue: Queue<SealedBag>,
/// The global epoch.
pub(crate) epoch: CachePadded<AtomicEpoch>,
}
impl Global {
/// Number of bags to destroy.
const COLLECT_STEPS: usize = 8;
/// Creates a new global data for garbage collection.
#[inline]
pub(crate) fn new() -> Self {
Self {
locals: List::new(),
queue: Queue::new(),
epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())),
}
}
/// Pushes the bag into the global queue and replaces the bag with a new empty bag.
pub(crate) fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
let bag = mem::replace(bag, Bag::new());
atomic::fence(Ordering::SeqCst);
let epoch = self.epoch.load(Ordering::Relaxed);
self.queue.push(bag.seal(epoch), guard);
}
/// Collects several bags from the global queue and executes deferred functions in them.
///
/// Note: This may itself produce garbage and in turn allocate new bags.
///
/// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold
/// path. In other words, we want the compiler to optimize branching for the case when
/// `collect()` is not called.
#[cold]
pub(crate) fn collect(&self, guard: &Guard) {
let global_epoch = self.try_advance(guard);
let steps = if cfg!(crossbeam_sanitize) {
usize::max_value()
} else {
Self::COLLECT_STEPS
};
for _ in 0..steps {
match self.queue.try_pop_if(
&|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch),
guard,
) {
None => break,
Some(sealed_bag) => drop(sealed_bag),
}
}
}
/// Attempts to advance the global epoch.
///
/// The global epoch can advance only if all currently pinned participants have been pinned in
/// the current epoch.
///
/// Returns the current global epoch.
///
/// `try_advance()` is annotated `#[cold]` because it is rarely called.
#[cold]
pub(crate) fn try_advance(&self, guard: &Guard) -> Epoch {
let global_epoch = self.epoch.load(Ordering::Relaxed);
atomic::fence(Ordering::SeqCst);
// TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly
// easy to implement in a lock-free manner. However, traversal can be slow due to cache
// misses and data dependencies. We should experiment with other data structures as well.
for local in self.locals.iter(guard) {
match local {
Err(IterError::Stalled) => {
// A concurrent thread stalled this iteration. That thread might also try to
// advance the epoch, in which case we leave the job to it. Otherwise, the
// epoch will not be advanced.
return global_epoch;
}
Ok(local) => {
let local_epoch = local.epoch.load(Ordering::Relaxed);
// If the participant was pinned in a different epoch, we cannot advance the
// global epoch just yet.
if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch {
return global_epoch;
}
}
}
}
atomic::fence(Ordering::Acquire);
// All pinned participants were pinned in the current global epoch.
// Now let's advance the global epoch...
//
// Note that if another thread already advanced it before us, this store will simply
// overwrite the global epoch with the same value. This is true because `try_advance` was
// called from a thread that was pinned in `global_epoch`, and the global epoch cannot be
// advanced two steps ahead of it.
let new_epoch = global_epoch.successor();
self.epoch.store(new_epoch, Ordering::Release);
new_epoch
}
}
/// Participant for garbage collection.
#[repr(C)] // Note: `entry` must be the first field
pub(crate) struct Local {
/// A node in the intrusive linked list of `Local`s.
entry: Entry,
/// A reference to the global data.
///
/// When all guards and handles get dropped, this reference is destroyed.
collector: UnsafeCell<ManuallyDrop<Collector>>,
/// The local bag of deferred functions.
pub(crate) bag: UnsafeCell<Bag>,
/// The number of guards keeping this participant pinned.
guard_count: Cell<usize>,
/// The number of active handles.
handle_count: Cell<usize>,
/// Total number of pinnings performed.
///
/// This is just an auxiliary counter that sometimes kicks off collection.
pin_count: Cell<Wrapping<usize>>,
/// The local epoch.
epoch: CachePadded<AtomicEpoch>,
}
// Make sure `Local` is less than or equal to 2048 bytes.
// https://github.com/crossbeam-rs/crossbeam/issues/551
#[cfg(not(any(crossbeam_sanitize, miri)))] // `crossbeam_sanitize` and `miri` reduce the size of `Local`
#[test]
fn local_size() {
// TODO: https://github.com/crossbeam-rs/crossbeam/issues/869
// assert!(
// core::mem::size_of::<Local>() <= 2048,
// "An allocation of `Local` should be <= 2048 bytes."
// );
}
impl Local {
/// Number of pinnings after which a participant will execute some deferred functions from the
/// global queue.
const PINNINGS_BETWEEN_COLLECT: usize = 128;
/// Registers a new `Local` in the provided `Global`.
pub(crate) fn register(collector: &Collector) -> LocalHandle {
unsafe {
// Since we dereference no pointers in this block, it is safe to use `unprotected`.
let local = Owned::new(Local {
entry: Entry::default(),
collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())),
bag: UnsafeCell::new(Bag::new()),
guard_count: Cell::new(0),
handle_count: Cell::new(1),
pin_count: Cell::new(Wrapping(0)),
epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())),
})
.into_shared(unprotected());
collector.global.locals.insert(local, unprotected());
LocalHandle {
local: local.as_raw(),
}
}
}
/// Returns a reference to the `Global` in which this `Local` resides.
#[inline]
pub(crate) fn global(&self) -> &Global {
&self.collector().global
}
/// Returns a reference to the `Collector` in which this `Local` resides.
#[inline]
pub(crate) fn collector(&self) -> &Collector {
self.collector.with(|c| unsafe { &**c })
}
/// Returns `true` if the current participant is pinned.
#[inline]
pub(crate) fn is_pinned(&self) -> bool {
self.guard_count.get() > 0
}
/// Adds `deferred` to the thread-local bag.
///
/// # Safety
///
/// It should be safe for another thread to execute the given function.
pub(crate) unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) {
let bag = self.bag.with_mut(|b| &mut *b);
while let Err(d) = bag.try_push(deferred) {
self.global().push_bag(bag, guard);
deferred = d;
}
}
pub(crate) fn flush(&self, guard: &Guard) {
let bag = self.bag.with_mut(|b| unsafe { &mut *b });
if !bag.is_empty() {
self.global().push_bag(bag, guard);
}
self.global().collect(guard);
}
/// Pins the `Local`.
#[inline]
pub(crate) fn pin(&self) -> Guard {
let guard = Guard { local: self };
let guard_count = self.guard_count.get();
self.guard_count.set(guard_count.checked_add(1).unwrap());
if guard_count == 0 {
let global_epoch = self.global().epoch.load(Ordering::Relaxed);
let new_epoch = global_epoch.pinned();
// Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
// The fence makes sure that any future loads from `Atomic`s will not happen before
// this store.
if cfg!(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(miri)
)) {
// HACK(stjepang): On x86 architectures there are two different ways of executing
// a `SeqCst` fence.
//
// 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
// 2. `_.compare_exchange(_, _, SeqCst, SeqCst)`, which compiles into a `lock cmpxchg`
// instruction.
//
// Both instructions have the effect of a full barrier, but benchmarks have shown
// that the second one makes pinning faster in this particular case. It is not
// clear that this is permitted by the C++ memory model (SC fences work very
// differently from SC accesses), but experimental evidence suggests that this
// works fine. Using inline assembly would be a viable (and correct) alternative,
// but alas, that is not possible on stable Rust.
let current = Epoch::starting();
let res = self.epoch.compare_exchange(
current,
new_epoch,
Ordering::SeqCst,
Ordering::SeqCst,
);
debug_assert!(res.is_ok(), "participant was expected to be unpinned");
// We add a compiler fence to make it less likely for LLVM to do something wrong
// here. Formally, this is not enough to get rid of data races; practically,
// it should go a long way.
atomic::compiler_fence(Ordering::SeqCst);
} else {
self.epoch.store(new_epoch, Ordering::Relaxed);
atomic::fence(Ordering::SeqCst);
}
// Increment the pin counter.
let count = self.pin_count.get();
self.pin_count.set(count + Wrapping(1));
// After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting
// some garbage.
if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 {
self.global().collect(&guard);
}
}
guard
}
/// Unpins the `Local`.
#[inline]
pub(crate) fn unpin(&self) {
let guard_count = self.guard_count.get();
self.guard_count.set(guard_count - 1);
if guard_count == 1 {
self.epoch.store(Epoch::starting(), Ordering::Release);
if self.handle_count.get() == 0 {
self.finalize();
}
}
}
/// Unpins and then pins the `Local`.
#[inline]
pub(crate) fn repin(&self) {
let guard_count = self.guard_count.get();
// Update the local epoch only if there's only one guard.
if guard_count == 1 {
let epoch = self.epoch.load(Ordering::Relaxed);
let global_epoch = self.global().epoch.load(Ordering::Relaxed).pinned();
// Update the local epoch only if the global epoch is greater than the local epoch.
if epoch != global_epoch {
// We store the new epoch with `Release` because we need to ensure any memory
// accesses from the previous epoch do not leak into the new one.
self.epoch.store(global_epoch, Ordering::Release);
// However, we don't need a following `SeqCst` fence, because it is safe for memory
// accesses from the new epoch to be executed before updating the local epoch. At
// worse, other threads will see the new epoch late and delay GC slightly.
}
}
}
/// Increments the handle count.
#[inline]
pub(crate) fn acquire_handle(&self) {
let handle_count = self.handle_count.get();
debug_assert!(handle_count >= 1);
self.handle_count.set(handle_count + 1);
}
/// Decrements the handle count.
#[inline]
pub(crate) fn release_handle(&self) {
let guard_count = self.guard_count.get();
let handle_count = self.handle_count.get();
debug_assert!(handle_count >= 1);
self.handle_count.set(handle_count - 1);
if guard_count == 0 && handle_count == 1 {
self.finalize();
}
}
/// Removes the `Local` from the global linked list.
#[cold]
fn finalize(&self) {
debug_assert_eq!(self.guard_count.get(), 0);
debug_assert_eq!(self.handle_count.get(), 0);
// Temporarily increment handle count. This is required so that the following call to `pin`
// doesn't call `finalize` again.
self.handle_count.set(1);
unsafe {
// Pin and move the local bag into the global queue. It's important that `push_bag`
// doesn't defer destruction on any new garbage.
let guard = &self.pin();
self.global()
.push_bag(self.bag.with_mut(|b| &mut *b), guard);
}
// Revert the handle count back to zero.
self.handle_count.set(0);
unsafe {
// Take the reference to the `Global` out of this `Local`. Since we're not protected
// by a guard at this time, it's crucial that the reference is read before marking the
// `Local` as deleted.
let collector: Collector = ptr::read(self.collector.with(|c| &*(*c)));
// Mark this node in the linked list as deleted.
self.entry.delete(unprotected());
// Finally, drop the reference to the global. Note that this might be the last reference
// to the `Global`. If so, the global data will be destroyed and all deferred functions
// in its queue will be executed.
drop(collector);
}
}
}
impl IsElement<Self> for Local {
fn entry_of(local: &Self) -> &Entry {
// SAFETY: `Local` is `repr(C)` and `entry` is the first field of it.
unsafe {
let entry_ptr = (local as *const Self).cast::<Entry>();
&*entry_ptr
}
}
unsafe fn element_of(entry: &Entry) -> &Self {
// SAFETY: `Local` is `repr(C)` and `entry` is the first field of it.
let local_ptr = (entry as *const Entry).cast::<Self>();
&*local_ptr
}
unsafe fn finalize(entry: &Entry, guard: &Guard) {
guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _));
}
}
#[cfg(all(test, not(crossbeam_loom)))]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use super::*;
#[test]
fn check_defer() {
static FLAG: AtomicUsize = AtomicUsize::new(0);
fn set() {
FLAG.store(42, Ordering::Relaxed);
}
let d = Deferred::new(set);
assert_eq!(FLAG.load(Ordering::Relaxed), 0);
d.call();
assert_eq!(FLAG.load(Ordering::Relaxed), 42);
}
#[test]
fn check_bag() {
static FLAG: AtomicUsize = AtomicUsize::new(0);
fn incr() {
FLAG.fetch_add(1, Ordering::Relaxed);
}
let mut bag = Bag::new();
assert!(bag.is_empty());
for _ in 0..MAX_OBJECTS {
assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() });
assert!(!bag.is_empty());
assert_eq!(FLAG.load(Ordering::Relaxed), 0);
}
let result = unsafe { bag.try_push(Deferred::new(incr)) };
assert!(result.is_err());
assert!(!bag.is_empty());
assert_eq!(FLAG.load(Ordering::Relaxed), 0);
drop(bag);
assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS);
}
}

View file

@ -1,166 +0,0 @@
//! Epoch-based memory reclamation.
//!
//! An interesting problem concurrent collections deal with comes from the remove operation.
//! Suppose that a thread removes an element from a lock-free map, while another thread is reading
//! that same element at the same time. The first thread must wait until the second thread stops
//! reading the element. Only then it is safe to destruct it.
//!
//! Programming languages that come with garbage collectors solve this problem trivially. The
//! garbage collector will destruct the removed element when no thread can hold a reference to it
//! anymore.
//!
//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an
//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and
//! marked with the current epoch. Every time a thread accesses a collection, it checks the current
//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread
//! can be referencing it anymore.
//!
//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit
//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something
//! users of concurrent collections don't have to worry much about.
//!
//! # Pointers
//!
//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which
//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a
//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely
//! read.
//!
//! # Pinning
//!
//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant
//! we declare that any object that gets removed from now on must not be destructed just
//! yet. Garbage collection of newly removed objects is suspended until the participant gets
//! unpinned.
//!
//! # Garbage
//!
//! Objects that get removed from concurrent collections must be stashed away until all currently
//! pinned participants get unpinned. Such objects can be stored into a thread-local or global
//! storage, where they are kept until the right time for their destruction comes.
//!
//! There is a global shared instance of garbage queue. You can [`defer`](Guard::defer) the execution of an
//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data
//! structures may defer the deallocation of an object.
//!
//! # APIs
//!
//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you
//! want to create your own garbage collector, use the [`Collector`] API.
#![doc(test(
no_crate_inject,
attr(
deny(warnings, rust_2018_idioms),
allow(dead_code, unused_assignments, unused_variables)
)
))]
#![warn(
missing_docs,
missing_debug_implementations,
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(crossbeam_loom)]
extern crate loom_crate as loom;
#[cfg(crossbeam_loom)]
#[allow(unused_imports, dead_code)]
mod primitive {
pub(crate) mod cell {
pub(crate) use loom::cell::UnsafeCell;
}
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
// FIXME: loom does not support compiler_fence at the moment.
// https://github.com/tokio-rs/loom/issues/117
// we use fence as a stand-in for compiler_fence for the time being.
// this may miss some races since fence is stronger than compiler_fence,
// but it's the best we can do for the time being.
pub(crate) use self::fence as compiler_fence;
}
pub(crate) use loom::sync::Arc;
}
pub(crate) use loom::thread_local;
}
#[cfg(target_has_atomic = "ptr")]
#[cfg(not(crossbeam_loom))]
#[allow(unused_imports, dead_code)]
mod primitive {
pub(crate) mod cell {
#[derive(Debug)]
#[repr(transparent)]
pub(crate) struct UnsafeCell<T>(::core::cell::UnsafeCell<T>);
// loom's UnsafeCell has a slightly different API than the standard library UnsafeCell.
// Since we want the rest of the code to be agnostic to whether it's running under loom or
// not, we write this small wrapper that provides the loom-supported API for the standard
// library UnsafeCell. This is also what the loom documentation recommends:
// https://github.com/tokio-rs/loom#handling-loom-api-differences
impl<T> UnsafeCell<T> {
#[inline]
pub(crate) const fn new(data: T) -> UnsafeCell<T> {
UnsafeCell(::core::cell::UnsafeCell::new(data))
}
#[inline]
pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R {
f(self.0.get())
}
#[inline]
pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R {
f(self.0.get())
}
}
}
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use core::sync::atomic::{
compiler_fence, fence, AtomicPtr, AtomicUsize, Ordering,
};
}
#[cfg(feature = "alloc")]
pub(crate) use alloc::sync::Arc;
}
#[cfg(feature = "std")]
pub(crate) use std::thread_local;
}
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
extern crate alloc;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod atomic;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod collector;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod deferred;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod epoch;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod guard;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod internal;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod sync;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
#[allow(deprecated)]
pub use crate::atomic::{CompareAndSetError, CompareAndSetOrdering};
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
pub use crate::{
atomic::{Atomic, CompareExchangeError, Owned, Pointable, Pointer, Shared},
collector::{Collector, LocalHandle},
guard::{unprotected, Guard},
};
#[cfg(feature = "std")]
mod default;
#[cfg(feature = "std")]
pub use crate::default::{default_collector, is_pinned, pin};

View file

@ -1,487 +0,0 @@
//! Lock-free intrusive linked list.
//!
//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA
//! 2002. <http://dl.acm.org/citation.cfm?id=564870.564881>
use core::marker::PhantomData;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crate::{unprotected, Atomic, Guard, Shared};
/// An entry in a linked list.
///
/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different
/// cache-line than thread-local data in terms of performance.
#[derive(Debug)]
pub(crate) struct Entry {
/// The next entry in the linked list.
/// If the tag is 1, this entry is marked as deleted.
next: Atomic<Entry>,
}
/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive
/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance
/// of `Entry`.
///
/// # Example
///
/// ```ignore
/// struct A {
/// entry: Entry,
/// data: usize,
/// }
///
/// impl IsElement<A> for A {
/// fn entry_of(a: &A) -> &Entry {
/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry;
/// unsafe { &*entry_ptr }
/// }
///
/// unsafe fn element_of(entry: &Entry) -> &T {
/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T;
/// &*elem_ptr
/// }
///
/// unsafe fn finalize(entry: &Entry, guard: &Guard) {
/// guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _));
/// }
/// }
/// ```
///
/// This trait is implemented on a type separate from `T` (although it can be just `T`), because
/// one type might be placeable into multiple lists, in which case it would require multiple
/// implementations of `IsElement`. In such cases, each struct implementing `IsElement<T>`
/// represents a distinct `Entry` in `T`.
///
/// For example, we can insert the following struct into two lists using `entry1` for one
/// and `entry2` for the other:
///
/// ```ignore
/// struct B {
/// entry1: Entry,
/// entry2: Entry,
/// data: usize,
/// }
/// ```
///
pub(crate) trait IsElement<T> {
/// Returns a reference to this element's `Entry`.
fn entry_of(_: &T) -> &Entry;
/// Given a reference to an element's entry, returns that element.
///
/// ```ignore
/// let elem = ListElement::new();
/// assert_eq!(elem.entry_of(),
/// unsafe { ListElement::element_of(elem.entry_of()) } );
/// ```
///
/// # Safety
///
/// The caller has to guarantee that the `Entry` is called with was retrieved from an instance
/// of the element type (`T`).
unsafe fn element_of(_: &Entry) -> &T;
/// The function that is called when an entry is unlinked from list.
///
/// # Safety
///
/// The caller has to guarantee that the `Entry` is called with was retrieved from an instance
/// of the element type (`T`).
unsafe fn finalize(_: &Entry, _: &Guard);
}
/// A lock-free, intrusive linked list of type `T`.
#[derive(Debug)]
pub(crate) struct List<T, C: IsElement<T> = T> {
/// The head of the linked list.
head: Atomic<Entry>,
/// The phantom data for using `T` and `C`.
_marker: PhantomData<(T, C)>,
}
/// An iterator used for retrieving values from the list.
pub(crate) struct Iter<'g, T, C: IsElement<T>> {
/// The guard that protects the iteration.
guard: &'g Guard,
/// Pointer from the predecessor to the current entry.
pred: &'g Atomic<Entry>,
/// The current entry.
curr: Shared<'g, Entry>,
/// The list head, needed for restarting iteration.
head: &'g Atomic<Entry>,
/// Logically, we store a borrow of an instance of `T` and
/// use the type information from `C`.
_marker: PhantomData<(&'g T, C)>,
}
/// An error that occurs during iteration over the list.
#[derive(PartialEq, Debug)]
pub(crate) enum IterError {
/// A concurrent thread modified the state of the list at the same place that this iterator
/// was inspecting. Subsequent iteration will restart from the beginning of the list.
Stalled,
}
impl Default for Entry {
/// Returns the empty entry.
fn default() -> Self {
Self {
next: Atomic::null(),
}
}
}
impl Entry {
/// Marks this entry as deleted, deferring the actual deallocation to a later iteration.
///
/// # Safety
///
/// The entry should be a member of a linked list, and it should not have been deleted.
/// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C`
/// is the associated helper for the linked list.
pub(crate) unsafe fn delete(&self, guard: &Guard) {
self.next.fetch_or(1, Release, guard);
}
}
impl<T, C: IsElement<T>> List<T, C> {
/// Returns a new, empty linked list.
pub(crate) fn new() -> Self {
Self {
head: Atomic::null(),
_marker: PhantomData,
}
}
/// Inserts `entry` into the head of the list.
///
/// # Safety
///
/// You should guarantee that:
///
/// - `container` is not null
/// - `container` is immovable, e.g. inside an `Owned`
/// - the same `Entry` is not inserted more than once
/// - the inserted object will be removed before the list is dropped
pub(crate) unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) {
// Insert right after head, i.e. at the beginning of the list.
let to = &self.head;
// Get the intrusively stored Entry of the new element to insert.
let entry: &Entry = C::entry_of(container.deref());
// Make a Shared ptr to that Entry.
let entry_ptr = Shared::from(entry as *const _);
// Read the current successor of where we want to insert.
let mut next = to.load(Relaxed, guard);
loop {
// Set the Entry of the to-be-inserted element to point to the previous successor of
// `to`.
entry.next.store(next, Relaxed);
match to.compare_exchange_weak(next, entry_ptr, Release, Relaxed, guard) {
Ok(_) => break,
// We lost the race or weak CAS failed spuriously. Update the successor and try
// again.
Err(err) => next = err.current,
}
}
}
/// Returns an iterator over all objects.
///
/// # Caveat
///
/// Every object that is inserted at the moment this function is called and persists at least
/// until the end of iteration will be returned. Since this iterator traverses a lock-free
/// linked list that may be concurrently modified, some additional caveats apply:
///
/// 1. If a new object is inserted during iteration, it may or may not be returned.
/// 2. If an object is deleted during iteration, it may or may not be returned.
/// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning
/// thread will continue to iterate over the same list.
pub(crate) fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
Iter {
guard,
pred: &self.head,
curr: self.head.load(Acquire, guard),
head: &self.head,
_marker: PhantomData,
}
}
}
impl<T, C: IsElement<T>> Drop for List<T, C> {
fn drop(&mut self) {
unsafe {
let guard = unprotected();
let mut curr = self.head.load(Relaxed, guard);
while let Some(c) = curr.as_ref() {
let succ = c.next.load(Relaxed, guard);
// Verify that all elements have been removed from the list.
assert_eq!(succ.tag(), 1);
C::finalize(curr.deref(), guard);
curr = succ;
}
}
}
}
impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
type Item = Result<&'g T, IterError>;
fn next(&mut self) -> Option<Self::Item> {
while let Some(c) = unsafe { self.curr.as_ref() } {
let succ = c.next.load(Acquire, self.guard);
if succ.tag() == 1 {
// This entry was removed. Try unlinking it from the list.
let succ = succ.with_tag(0);
// The tag should always be zero, because removing a node after a logically deleted
// node leaves the list in an invalid state.
debug_assert!(self.curr.tag() == 0);
// Try to unlink `curr` from the list, and get the new value of `self.pred`.
let succ = match self
.pred
.compare_exchange(self.curr, succ, Acquire, Acquire, self.guard)
{
Ok(_) => {
// We succeeded in unlinking `curr`, so we have to schedule
// deallocation. Deferred drop is okay, because `list.delete()` can only be
// called if `T: 'static`.
unsafe {
C::finalize(self.curr.deref(), self.guard);
}
// `succ` is the new value of `self.pred`.
succ
}
Err(e) => {
// `e.current` is the current value of `self.pred`.
e.current
}
};
// If the predecessor node is already marked as deleted, we need to restart from
// `head`.
if succ.tag() != 0 {
self.pred = self.head;
self.curr = self.head.load(Acquire, self.guard);
return Some(Err(IterError::Stalled));
}
// Move over the removed by only advancing `curr`, not `pred`.
self.curr = succ;
continue;
}
// Move one step forward.
self.pred = &c.next;
self.curr = succ;
return Some(Ok(unsafe { C::element_of(c) }));
}
// We reached the end of the list.
None
}
}
#[cfg(all(test, not(crossbeam_loom)))]
mod tests {
use super::*;
use crate::{Collector, Owned};
use crossbeam_utils::thread;
use std::sync::Barrier;
impl IsElement<Entry> for Entry {
fn entry_of(entry: &Entry) -> &Entry {
entry
}
unsafe fn element_of(entry: &Entry) -> &Entry {
entry
}
unsafe fn finalize(entry: &Entry, guard: &Guard) {
guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _));
}
}
/// Checks whether the list retains inserted elements
/// and returns them in the correct order.
#[test]
fn insert() {
let collector = Collector::new();
let handle = collector.register();
let guard = handle.pin();
let l: List<Entry> = List::new();
let e1 = Owned::new(Entry::default()).into_shared(&guard);
let e2 = Owned::new(Entry::default()).into_shared(&guard);
let e3 = Owned::new(Entry::default()).into_shared(&guard);
unsafe {
l.insert(e1, &guard);
l.insert(e2, &guard);
l.insert(e3, &guard);
}
let mut iter = l.iter(&guard);
let maybe_e3 = iter.next();
assert!(maybe_e3.is_some());
assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
let maybe_e2 = iter.next();
assert!(maybe_e2.is_some());
assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw());
let maybe_e1 = iter.next();
assert!(maybe_e1.is_some());
assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
assert!(iter.next().is_none());
unsafe {
e1.as_ref().unwrap().delete(&guard);
e2.as_ref().unwrap().delete(&guard);
e3.as_ref().unwrap().delete(&guard);
}
}
/// Checks whether elements can be removed from the list and whether
/// the correct elements are removed.
#[test]
fn delete() {
let collector = Collector::new();
let handle = collector.register();
let guard = handle.pin();
let l: List<Entry> = List::new();
let e1 = Owned::new(Entry::default()).into_shared(&guard);
let e2 = Owned::new(Entry::default()).into_shared(&guard);
let e3 = Owned::new(Entry::default()).into_shared(&guard);
unsafe {
l.insert(e1, &guard);
l.insert(e2, &guard);
l.insert(e3, &guard);
e2.as_ref().unwrap().delete(&guard);
}
let mut iter = l.iter(&guard);
let maybe_e3 = iter.next();
assert!(maybe_e3.is_some());
assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
let maybe_e1 = iter.next();
assert!(maybe_e1.is_some());
assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
assert!(iter.next().is_none());
unsafe {
e1.as_ref().unwrap().delete(&guard);
e3.as_ref().unwrap().delete(&guard);
}
let mut iter = l.iter(&guard);
assert!(iter.next().is_none());
}
const THREADS: usize = 8;
const ITERS: usize = 512;
/// Contends the list on insert and delete operations to make sure they can run concurrently.
#[test]
fn insert_delete_multi() {
let collector = Collector::new();
let l: List<Entry> = List::new();
let b = Barrier::new(THREADS);
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
b.wait();
let handle = collector.register();
let guard: Guard = handle.pin();
let mut v = Vec::with_capacity(ITERS);
for _ in 0..ITERS {
let e = Owned::new(Entry::default()).into_shared(&guard);
v.push(e);
unsafe {
l.insert(e, &guard);
}
}
for e in v {
unsafe {
e.as_ref().unwrap().delete(&guard);
}
}
});
}
})
.unwrap();
let handle = collector.register();
let guard = handle.pin();
let mut iter = l.iter(&guard);
assert!(iter.next().is_none());
}
/// Contends the list on iteration to make sure that it can be iterated over concurrently.
#[test]
fn iter_multi() {
let collector = Collector::new();
let l: List<Entry> = List::new();
let b = Barrier::new(THREADS);
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
b.wait();
let handle = collector.register();
let guard: Guard = handle.pin();
let mut v = Vec::with_capacity(ITERS);
for _ in 0..ITERS {
let e = Owned::new(Entry::default()).into_shared(&guard);
v.push(e);
unsafe {
l.insert(e, &guard);
}
}
let mut iter = l.iter(&guard);
for _ in 0..ITERS {
assert!(iter.next().is_some());
}
for e in v {
unsafe {
e.as_ref().unwrap().delete(&guard);
}
}
});
}
})
.unwrap();
let handle = collector.register();
let guard = handle.pin();
let mut iter = l.iter(&guard);
assert!(iter.next().is_none());
}
}

View file

@ -1,7 +0,0 @@
//! Synchronization primitives.
pub(crate) mod list;
#[cfg(feature = "std")]
#[cfg(not(crossbeam_loom))]
pub(crate) mod once_lock;
pub(crate) mod queue;

View file

@ -1,88 +0,0 @@
// Based on unstable std::sync::OnceLock.
//
// Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs
use core::cell::UnsafeCell;
use core::mem::MaybeUninit;
use std::sync::Once;
pub(crate) struct OnceLock<T> {
once: Once,
value: UnsafeCell<MaybeUninit<T>>,
// Unlike std::sync::OnceLock, we don't need PhantomData here because
// we don't use #[may_dangle].
}
unsafe impl<T: Sync + Send> Sync for OnceLock<T> {}
unsafe impl<T: Send> Send for OnceLock<T> {}
impl<T> OnceLock<T> {
/// Creates a new empty cell.
#[must_use]
pub(crate) const fn new() -> Self {
Self {
once: Once::new(),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
}
/// Gets the contents of the cell, initializing it with `f` if the cell
/// was empty.
///
/// Many threads may call `get_or_init` concurrently with different
/// initializing functions, but it is guaranteed that only one function
/// will be executed.
///
/// # Panics
///
/// If `f` panics, the panic is propagated to the caller, and the cell
/// remains uninitialized.
///
/// It is an error to reentrantly initialize the cell from `f`. The
/// exact outcome is unspecified. Current implementation deadlocks, but
/// this may be changed to a panic in the future.
pub(crate) fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> T,
{
// Fast path check
if self.once.is_completed() {
// SAFETY: The inner value has been initialized
return unsafe { self.get_unchecked() };
}
self.initialize(f);
// SAFETY: The inner value has been initialized
unsafe { self.get_unchecked() }
}
#[cold]
fn initialize<F>(&self, f: F)
where
F: FnOnce() -> T,
{
let slot = self.value.get();
self.once.call_once(|| {
let value = f();
unsafe { slot.write(MaybeUninit::new(value)) }
});
}
/// # Safety
///
/// The value must be initialized
unsafe fn get_unchecked(&self) -> &T {
debug_assert!(self.once.is_completed());
&*self.value.get().cast::<T>()
}
}
impl<T> Drop for OnceLock<T> {
fn drop(&mut self) {
if self.once.is_completed() {
// SAFETY: The inner value has been initialized
unsafe { (*self.value.get()).assume_init_drop() };
}
}
}

View file

@ -1,468 +0,0 @@
//! Michael-Scott lock-free queue.
//!
//! Usable with any number of producers and consumers.
//!
//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
//! Algorithms. PODC 1996. <http://dl.acm.org/citation.cfm?id=248106>
//!
//! Simon Doherty, Lindsay Groves, Victor Luchangco, and Mark Moir. 2004b. Formal Verification of a
//! Practical Lock-Free Queue Algorithm. <https://doi.org/10.1007/978-3-540-30232-2_7>
use core::mem::MaybeUninit;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crossbeam_utils::CachePadded;
use crate::{unprotected, Atomic, Guard, Owned, Shared};
// The representation here is a singly-linked list, with a sentinel node at the front. In general
// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or
// all `Blocked` (requests for data from blocked threads).
#[derive(Debug)]
pub(crate) struct Queue<T> {
head: CachePadded<Atomic<Node<T>>>,
tail: CachePadded<Atomic<Node<T>>>,
}
struct Node<T> {
/// The slot in which a value of type `T` can be stored.
///
/// The type of `data` is `MaybeUninit<T>` because a `Node<T>` doesn't always contain a `T`.
/// For example, the sentinel node in a queue never contains a value: its slot is always empty.
/// Other nodes start their life with a push operation and contain a value until it gets popped
/// out. After that such empty nodes get added to the collector for destruction.
data: MaybeUninit<T>,
next: Atomic<Node<T>>,
}
// Any particular `T` should never be accessed concurrently, so no need for `Sync`.
unsafe impl<T: Send> Sync for Queue<T> {}
unsafe impl<T: Send> Send for Queue<T> {}
impl<T> Queue<T> {
/// Create a new, empty queue.
pub(crate) fn new() -> Queue<T> {
let q = Queue {
head: CachePadded::new(Atomic::null()),
tail: CachePadded::new(Atomic::null()),
};
let sentinel = Owned::new(Node {
data: MaybeUninit::uninit(),
next: Atomic::null(),
});
unsafe {
let guard = unprotected();
let sentinel = sentinel.into_shared(guard);
q.head.store(sentinel, Relaxed);
q.tail.store(sentinel, Relaxed);
q
}
}
/// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on
/// success. The queue's `tail` pointer may be updated.
#[inline(always)]
fn push_internal(
&self,
onto: Shared<'_, Node<T>>,
new: Shared<'_, Node<T>>,
guard: &Guard,
) -> bool {
// is `onto` the actual tail?
let o = unsafe { onto.deref() };
let next = o.next.load(Acquire, guard);
if unsafe { next.as_ref().is_some() } {
// if not, try to "help" by moving the tail pointer forward
let _ = self
.tail
.compare_exchange(onto, next, Release, Relaxed, guard);
false
} else {
// looks like the actual tail; attempt to link in `n`
let result = o
.next
.compare_exchange(Shared::null(), new, Release, Relaxed, guard)
.is_ok();
if result {
// try to move the tail pointer forward
let _ = self
.tail
.compare_exchange(onto, new, Release, Relaxed, guard);
}
result
}
}
/// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`.
pub(crate) fn push(&self, t: T, guard: &Guard) {
let new = Owned::new(Node {
data: MaybeUninit::new(t),
next: Atomic::null(),
});
let new = Owned::into_shared(new, guard);
loop {
// We push onto the tail, so we'll start optimistically by looking there first.
let tail = self.tail.load(Acquire, guard);
// Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed.
if self.push_internal(tail, new, guard) {
break;
}
}
}
/// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop.
#[inline(always)]
fn pop_internal(&self, guard: &Guard) -> Result<Option<T>, ()> {
let head = self.head.load(Acquire, guard);
let h = unsafe { head.deref() };
let next = h.next.load(Acquire, guard);
match unsafe { next.as_ref() } {
Some(n) => unsafe {
self.head
.compare_exchange(head, next, Release, Relaxed, guard)
.map(|_| {
let tail = self.tail.load(Relaxed, guard);
// Advance the tail so that we don't retire a pointer to a reachable node.
if head == tail {
let _ = self
.tail
.compare_exchange(tail, next, Release, Relaxed, guard);
}
guard.defer_destroy(head);
Some(n.data.assume_init_read())
})
.map_err(|_| ())
},
None => Ok(None),
}
}
/// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue
/// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop.
#[inline(always)]
fn pop_if_internal<F>(&self, condition: F, guard: &Guard) -> Result<Option<T>, ()>
where
T: Sync,
F: Fn(&T) -> bool,
{
let head = self.head.load(Acquire, guard);
let h = unsafe { head.deref() };
let next = h.next.load(Acquire, guard);
match unsafe { next.as_ref() } {
Some(n) if condition(unsafe { &*n.data.as_ptr() }) => unsafe {
self.head
.compare_exchange(head, next, Release, Relaxed, guard)
.map(|_| {
let tail = self.tail.load(Relaxed, guard);
// Advance the tail so that we don't retire a pointer to a reachable node.
if head == tail {
let _ = self
.tail
.compare_exchange(tail, next, Release, Relaxed, guard);
}
guard.defer_destroy(head);
Some(n.data.assume_init_read())
})
.map_err(|_| ())
},
None | Some(_) => Ok(None),
}
}
/// Attempts to dequeue from the front.
///
/// Returns `None` if the queue is observed to be empty.
pub(crate) fn try_pop(&self, guard: &Guard) -> Option<T> {
loop {
if let Ok(head) = self.pop_internal(guard) {
return head;
}
}
}
/// Attempts to dequeue from the front, if the item satisfies the given condition.
///
/// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given
/// condition.
pub(crate) fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T>
where
T: Sync,
F: Fn(&T) -> bool,
{
loop {
if let Ok(head) = self.pop_if_internal(&condition, guard) {
return head;
}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
let guard = unprotected();
while self.try_pop(guard).is_some() {}
// Destroy the remaining sentinel node.
let sentinel = self.head.load(Relaxed, guard);
drop(sentinel.into_owned());
}
}
}
#[cfg(all(test, not(crossbeam_loom)))]
mod test {
use super::*;
use crate::pin;
use crossbeam_utils::thread;
struct Queue<T> {
queue: super::Queue<T>,
}
impl<T> Queue<T> {
pub(crate) fn new() -> Queue<T> {
Queue {
queue: super::Queue::new(),
}
}
pub(crate) fn push(&self, t: T) {
let guard = &pin();
self.queue.push(t, guard);
}
pub(crate) fn is_empty(&self) -> bool {
let guard = &pin();
let head = self.queue.head.load(Acquire, guard);
let h = unsafe { head.deref() };
h.next.load(Acquire, guard).is_null()
}
pub(crate) fn try_pop(&self) -> Option<T> {
let guard = &pin();
self.queue.try_pop(guard)
}
pub(crate) fn pop(&self) -> T {
loop {
match self.try_pop() {
None => continue,
Some(t) => return t,
}
}
}
}
#[cfg(miri)]
const CONC_COUNT: i64 = 1000;
#[cfg(not(miri))]
const CONC_COUNT: i64 = 1000000;
#[test]
fn push_try_pop_1() {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
q.push(37);
assert!(!q.is_empty());
assert_eq!(q.try_pop(), Some(37));
assert!(q.is_empty());
}
#[test]
fn push_try_pop_2() {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
q.push(37);
q.push(48);
assert_eq!(q.try_pop(), Some(37));
assert!(!q.is_empty());
assert_eq!(q.try_pop(), Some(48));
assert!(q.is_empty());
}
#[test]
fn push_try_pop_many_seq() {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
for i in 0..200 {
q.push(i)
}
assert!(!q.is_empty());
for i in 0..200 {
assert_eq!(q.try_pop(), Some(i));
}
assert!(q.is_empty());
}
#[test]
fn push_pop_1() {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
q.push(37);
assert!(!q.is_empty());
assert_eq!(q.pop(), 37);
assert!(q.is_empty());
}
#[test]
fn push_pop_2() {
let q: Queue<i64> = Queue::new();
q.push(37);
q.push(48);
assert_eq!(q.pop(), 37);
assert_eq!(q.pop(), 48);
}
#[test]
fn push_pop_many_seq() {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
for i in 0..200 {
q.push(i)
}
assert!(!q.is_empty());
for i in 0..200 {
assert_eq!(q.pop(), i);
}
assert!(q.is_empty());
}
#[test]
fn push_try_pop_many_spsc() {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
thread::scope(|scope| {
scope.spawn(|_| {
let mut next = 0;
while next < CONC_COUNT {
if let Some(elem) = q.try_pop() {
assert_eq!(elem, next);
next += 1;
}
}
});
for i in 0..CONC_COUNT {
q.push(i)
}
})
.unwrap();
}
#[test]
fn push_try_pop_many_spmc() {
fn recv(_t: i32, q: &Queue<i64>) {
let mut cur = -1;
for _i in 0..CONC_COUNT {
if let Some(elem) = q.try_pop() {
assert!(elem > cur);
cur = elem;
if cur == CONC_COUNT - 1 {
break;
}
}
}
}
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
thread::scope(|scope| {
for i in 0..3 {
let q = &q;
scope.spawn(move |_| recv(i, q));
}
scope.spawn(|_| {
for i in 0..CONC_COUNT {
q.push(i);
}
});
})
.unwrap();
}
#[test]
fn push_try_pop_many_mpmc() {
enum LR {
Left(i64),
Right(i64),
}
let q: Queue<LR> = Queue::new();
assert!(q.is_empty());
thread::scope(|scope| {
for _t in 0..2 {
scope.spawn(|_| {
for i in CONC_COUNT - 1..CONC_COUNT {
q.push(LR::Left(i))
}
});
scope.spawn(|_| {
for i in CONC_COUNT - 1..CONC_COUNT {
q.push(LR::Right(i))
}
});
scope.spawn(|_| {
let mut vl = vec![];
let mut vr = vec![];
for _i in 0..CONC_COUNT {
match q.try_pop() {
Some(LR::Left(x)) => vl.push(x),
Some(LR::Right(x)) => vr.push(x),
_ => {}
}
}
let mut vl2 = vl.clone();
let mut vr2 = vr.clone();
vl2.sort_unstable();
vr2.sort_unstable();
assert_eq!(vl, vl2);
assert_eq!(vr, vr2);
});
}
})
.unwrap();
}
#[test]
fn push_pop_many_spsc() {
let q: Queue<i64> = Queue::new();
thread::scope(|scope| {
scope.spawn(|_| {
let mut next = 0;
while next < CONC_COUNT {
assert_eq!(q.pop(), next);
next += 1;
}
});
for i in 0..CONC_COUNT {
q.push(i)
}
})
.unwrap();
assert!(q.is_empty());
}
#[test]
fn is_empty_dont_pop() {
let q: Queue<i64> = Queue::new();
q.push(20);
q.push(20);
assert!(!q.is_empty());
assert!(!q.is_empty());
assert!(q.try_pop().is_some());
}
}

View file

@ -1,157 +0,0 @@
#![cfg(crossbeam_loom)]
use crossbeam_epoch as epoch;
use loom_crate as loom;
use epoch::*;
use epoch::{Atomic, Owned};
use loom::sync::atomic::Ordering::{self, Acquire, Relaxed, Release};
use loom::sync::Arc;
use loom::thread::spawn;
use std::mem::ManuallyDrop;
use std::ptr;
#[test]
fn it_works() {
loom::model(|| {
let collector = Collector::new();
let item: Atomic<String> = Atomic::from(Owned::new(String::from("boom")));
let item2 = item.clone();
let collector2 = collector.clone();
let guard = collector.register().pin();
let jh = loom::thread::spawn(move || {
let guard = collector2.register().pin();
guard.defer(move || {
// this isn't really safe, since other threads may still have pointers to the
// value, but in this limited test scenario it's okay, since we know the test won't
// access item after all the pins are released.
let mut item = unsafe { item2.into_owned() };
// mutate it as a second measure to make sure the assert_eq below would fail
item.retain(|c| c == 'o');
drop(item);
});
});
let item = item.load(Ordering::SeqCst, &guard);
// we pinned strictly before the call to defer_destroy,
// so item cannot have been dropped yet
assert_eq!(*unsafe { item.deref() }, "boom");
drop(guard);
jh.join().unwrap();
drop(collector);
})
}
#[test]
fn treiber_stack() {
/// Treiber's lock-free stack.
///
/// Usable with any number of producers and consumers.
#[derive(Debug)]
pub struct TreiberStack<T> {
head: Atomic<Node<T>>,
}
#[derive(Debug)]
struct Node<T> {
data: ManuallyDrop<T>,
next: Atomic<Node<T>>,
}
impl<T> TreiberStack<T> {
/// Creates a new, empty stack.
pub fn new() -> TreiberStack<T> {
TreiberStack {
head: Atomic::null(),
}
}
/// Pushes a value on top of the stack.
pub fn push(&self, t: T) {
let mut n = Owned::new(Node {
data: ManuallyDrop::new(t),
next: Atomic::null(),
});
let guard = epoch::pin();
loop {
let head = self.head.load(Relaxed, &guard);
n.next.store(head, Relaxed);
match self
.head
.compare_exchange(head, n, Release, Relaxed, &guard)
{
Ok(_) => break,
Err(e) => n = e.new,
}
}
}
/// Attempts to pop the top element from the stack.
///
/// Returns `None` if the stack is empty.
pub fn pop(&self) -> Option<T> {
let guard = epoch::pin();
loop {
let head = self.head.load(Acquire, &guard);
match unsafe { head.as_ref() } {
Some(h) => {
let next = h.next.load(Relaxed, &guard);
if self
.head
.compare_exchange(head, next, Relaxed, Relaxed, &guard)
.is_ok()
{
unsafe {
guard.defer_destroy(head);
return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data)));
}
}
}
None => return None,
}
}
}
/// Returns `true` if the stack is empty.
pub fn is_empty(&self) -> bool {
let guard = epoch::pin();
self.head.load(Acquire, &guard).is_null()
}
}
impl<T> Drop for TreiberStack<T> {
fn drop(&mut self) {
while self.pop().is_some() {}
}
}
loom::model(|| {
let stack1 = Arc::new(TreiberStack::new());
let stack2 = Arc::clone(&stack1);
// use 5 since it's greater than the 4 used for the sanitize feature
let jh = spawn(move || {
for i in 0..5 {
stack2.push(i);
assert!(stack2.pop().is_some());
}
});
for i in 0..5 {
stack1.push(i);
assert!(stack1.pop().is_some());
}
jh.join().unwrap();
assert!(stack1.pop().is_none());
assert!(stack1.is_empty());
});
}

View file

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"2c10a9c588d28ae9edabaaf50d224d523790080cbf82783143e170395aed2a35","Cargo.toml":"8435b3df4a6f66aa2860e874b1f98f074535b6ad9b92708ed90ccb3410f7479c","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"88a50c7a414f6d8431061f0741dc6b46db012ec338b4e57d5d3e9746eeaaa543","src/array_queue.rs":"9c3360280522ae0ea3640bf8a3e454038860a6891a5fa11ef6c0237a45fb2963","src/lib.rs":"87d7a57fab9f20e088bb0612b10ee9499c2512d714b5453ab34c0400a3243ea0","src/seg_queue.rs":"f97a024d8f2a4dad332b5ff371c81a1b3750caf25cc67ae5d61ee789509b55b1","tests/array_queue.rs":"426dd0ff6698bd63108b3a567703ec2e635bce0f337134116e237b11925a7716","tests/seg_queue.rs":"7abb1008638a947440b201e1ad15f273020730715fae1f876407f1b967ae28ff"},"package":"df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35"}

View file

@ -1,84 +0,0 @@
# Version 0.3.11
- Remove dependency on `cfg-if`. (#1072)
# Version 0.3.10
- Relax the minimum supported Rust version to 1.60. (#1056)
- Implement `UnwindSafe` and `RefUnwindSafe` for `ArrayQueue` and `SegQueue`. (#1053)
- Optimize `Drop` implementation of `ArrayQueue`. (#1057)
# Version 0.3.9
- Bump the minimum supported Rust version to 1.61. (#1037)
- Improve support for targets without atomic CAS. (#1037)
- Remove build script. (#1037)
# Version 0.3.8
- Fix build script bug introduced in 0.3.7. (#932)
# Version 0.3.7
**Note:** This release has been yanked due to regression fixed in 0.3.8.
- Improve support for custom targets. (#922)
# Version 0.3.6
- Bump the minimum supported Rust version to 1.38. (#877)
# Version 0.3.5
- Add `ArrayQueue::force_push`. (#789)
# Version 0.3.4
- Implement `IntoIterator` for `ArrayQueue` and `SegQueue`. (#772)
# Version 0.3.3
- Fix stacked borrows violation in `ArrayQueue` when `-Zmiri-tag-raw-pointers` is enabled. (#763)
# Version 0.3.2
- Support targets that do not have atomic CAS on stable Rust. (#698)
# Version 0.3.1
- Make `SegQueue::new` const fn. (#584)
- Change license to "MIT OR Apache-2.0".
# Version 0.3.0
- Bump the minimum supported Rust version to 1.36.
- Remove `PushError` and `PopError`.
# Version 0.2.3
- Fix bug in release (yanking 0.2.2)
# Version 0.2.2
- Fix unsoundness issues by adopting `MaybeUninit`. (#458)
# Version 0.2.1
- Add `no_std` support.
# Version 0.2.0
- Bump the minimum required version to 1.28.
- Bump `crossbeam-utils` to `0.7`.
# Version 0.1.2
- Update `crossbeam-utils` to `0.6.5`.
# Version 0.1.1
- Update `crossbeam-utils` to `0.6.4`.
# Version 0.1.0
- Initial version with `ArrayQueue` and `SegQueue`.

View file

@ -1,49 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.60"
name = "crossbeam-queue"
version = "0.3.11"
description = "Concurrent queues"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-queue"
readme = "README.md"
keywords = [
"queue",
"mpmc",
"lock-free",
"producer",
"consumer",
]
categories = [
"concurrency",
"data-structures",
"no-std",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.crossbeam-utils]
version = "0.8.18"
default-features = false
[dev-dependencies.rand]
version = "0.8"
[features]
alloc = []
default = ["std"]
nightly = ["crossbeam-utils/nightly"]
std = [
"alloc",
"crossbeam-utils/std",
]

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,27 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 The Crossbeam Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -1,54 +0,0 @@
# Crossbeam Queue
[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)](
https://github.com/crossbeam-rs/crossbeam/actions)
[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-queue#license)
[![Cargo](https://img.shields.io/crates/v/crossbeam-queue.svg)](
https://crates.io/crates/crossbeam-queue)
[![Documentation](https://docs.rs/crossbeam-queue/badge.svg)](
https://docs.rs/crossbeam-queue)
[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)](
https://www.rust-lang.org)
[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ)
This crate provides concurrent queues that can be shared among threads:
* [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction.
* [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand.
Everything in this crate can be used in `no_std` environments, provided that `alloc` feature is
enabled.
[`ArrayQueue`]: https://docs.rs/crossbeam-queue/*/crossbeam_queue/struct.ArrayQueue.html
[`SegQueue`]: https://docs.rs/crossbeam-queue/*/crossbeam_queue/struct.SegQueue.html
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-queue = "0.3"
```
## Compatibility
Crossbeam Queue supports stable Rust releases going back at least six months,
and every time the minimum supported Rust version is increased, a new minor
version is released. Currently, the minimum supported Rust version is 1.60.
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View file

@ -1,541 +0,0 @@
//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
//!
//! Source:
//! - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>
use alloc::boxed::Box;
use core::cell::UnsafeCell;
use core::fmt;
use core::mem::{self, MaybeUninit};
use core::panic::{RefUnwindSafe, UnwindSafe};
use core::sync::atomic::{self, AtomicUsize, Ordering};
use crossbeam_utils::{Backoff, CachePadded};
/// A slot in a queue.
struct Slot<T> {
/// The current stamp.
///
/// If the stamp equals the tail, this node will be next written to. If it equals head + 1,
/// this node will be next read from.
stamp: AtomicUsize,
/// The value in this slot.
value: UnsafeCell<MaybeUninit<T>>,
}
/// A bounded multi-producer multi-consumer queue.
///
/// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed
/// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an
/// element into a full queue will fail. Alternatively, [`force_push`] makes it possible for
/// this queue to be used as a ring-buffer. Having a buffer allocated upfront makes this queue
/// a bit faster than [`SegQueue`].
///
/// [`force_push`]: ArrayQueue::force_push
/// [`SegQueue`]: super::SegQueue
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(2);
///
/// assert_eq!(q.push('a'), Ok(()));
/// assert_eq!(q.push('b'), Ok(()));
/// assert_eq!(q.push('c'), Err('c'));
/// assert_eq!(q.pop(), Some('a'));
/// ```
pub struct ArrayQueue<T> {
/// The head of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
/// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
///
/// Elements are popped from the head of the queue.
head: CachePadded<AtomicUsize>,
/// The tail of the queue.
///
/// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a
/// single `usize`. The lower bits represent the index, while the upper bits represent the lap.
///
/// Elements are pushed into the tail of the queue.
tail: CachePadded<AtomicUsize>,
/// The buffer holding slots.
buffer: Box<[Slot<T>]>,
/// The queue capacity.
cap: usize,
/// A stamp with the value of `{ lap: 1, index: 0 }`.
one_lap: usize,
}
unsafe impl<T: Send> Sync for ArrayQueue<T> {}
unsafe impl<T: Send> Send for ArrayQueue<T> {}
impl<T> UnwindSafe for ArrayQueue<T> {}
impl<T> RefUnwindSafe for ArrayQueue<T> {}
impl<T> ArrayQueue<T> {
/// Creates a new bounded queue with the given capacity.
///
/// # Panics
///
/// Panics if the capacity is zero.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::<i32>::new(100);
/// ```
pub fn new(cap: usize) -> ArrayQueue<T> {
assert!(cap > 0, "capacity must be non-zero");
// Head is initialized to `{ lap: 0, index: 0 }`.
// Tail is initialized to `{ lap: 0, index: 0 }`.
let head = 0;
let tail = 0;
// Allocate a buffer of `cap` slots initialized
// with stamps.
let buffer: Box<[Slot<T>]> = (0..cap)
.map(|i| {
// Set the stamp to `{ lap: 0, index: i }`.
Slot {
stamp: AtomicUsize::new(i),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
})
.collect();
// One lap is the smallest power of two greater than `cap`.
let one_lap = (cap + 1).next_power_of_two();
ArrayQueue {
buffer,
cap,
one_lap,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
}
}
fn push_or_else<F>(&self, mut value: T, f: F) -> Result<(), T>
where
F: Fn(T, usize, usize, &Slot<T>) -> Result<T, T>,
{
let backoff = Backoff::new();
let mut tail = self.tail.load(Ordering::Relaxed);
loop {
// Deconstruct the tail.
let index = tail & (self.one_lap - 1);
let lap = tail & !(self.one_lap - 1);
let new_tail = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Inspect the corresponding slot.
debug_assert!(index < self.buffer.len());
let slot = unsafe { self.buffer.get_unchecked(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
// Try moving the tail.
match self.tail.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Write the value into the slot and update the stamp.
unsafe {
slot.value.get().write(MaybeUninit::new(value));
}
slot.stamp.store(tail + 1, Ordering::Release);
return Ok(());
}
Err(t) => {
tail = t;
backoff.spin();
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
atomic::fence(Ordering::SeqCst);
value = f(value, tail, new_tail, slot)?;
backoff.spin();
tail = self.tail.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
tail = self.tail.load(Ordering::Relaxed);
}
}
}
/// Attempts to push an element into the queue.
///
/// If the queue is full, the element is returned back as an error.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(1);
///
/// assert_eq!(q.push(10), Ok(()));
/// assert_eq!(q.push(20), Err(20));
/// ```
pub fn push(&self, value: T) -> Result<(), T> {
self.push_or_else(value, |v, tail, _, _| {
let head = self.head.load(Ordering::Relaxed);
// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the queue is full.
Err(v)
} else {
Ok(v)
}
})
}
/// Pushes an element into the queue, replacing the oldest element if necessary.
///
/// If the queue is full, the oldest element is replaced and returned,
/// otherwise `None` is returned.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(2);
///
/// assert_eq!(q.force_push(10), None);
/// assert_eq!(q.force_push(20), None);
/// assert_eq!(q.force_push(30), Some(10));
/// assert_eq!(q.pop(), Some(20));
/// ```
pub fn force_push(&self, value: T) -> Option<T> {
self.push_or_else(value, |v, tail, new_tail, slot| {
let head = tail.wrapping_sub(self.one_lap);
let new_head = new_tail.wrapping_sub(self.one_lap);
// Try moving the head.
if self
.head
.compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Relaxed)
.is_ok()
{
// Move the tail.
self.tail.store(new_tail, Ordering::SeqCst);
// Swap the previous value.
let old = unsafe { slot.value.get().replace(MaybeUninit::new(v)).assume_init() };
// Update the stamp.
slot.stamp.store(tail + 1, Ordering::Release);
Err(old)
} else {
Ok(v)
}
})
.err()
}
/// Attempts to pop an element from the queue.
///
/// If the queue is empty, `None` is returned.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(1);
/// assert_eq!(q.push(10), Ok(()));
///
/// assert_eq!(q.pop(), Some(10));
/// assert!(q.pop().is_none());
/// ```
pub fn pop(&self) -> Option<T> {
let backoff = Backoff::new();
let mut head = self.head.load(Ordering::Relaxed);
loop {
// Deconstruct the head.
let index = head & (self.one_lap - 1);
let lap = head & !(self.one_lap - 1);
// Inspect the corresponding slot.
debug_assert!(index < self.buffer.len());
let slot = unsafe { self.buffer.get_unchecked(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the head.
match self.head.compare_exchange_weak(
head,
new,
Ordering::SeqCst,
Ordering::Relaxed,
) {
Ok(_) => {
// Read the value from the slot and update the stamp.
let msg = unsafe { slot.value.get().read().assume_init() };
slot.stamp
.store(head.wrapping_add(self.one_lap), Ordering::Release);
return Some(msg);
}
Err(h) => {
head = h;
backoff.spin();
}
}
} else if stamp == head {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);
// If the tail equals the head, that means the channel is empty.
if tail == head {
return None;
}
backoff.spin();
head = self.head.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
head = self.head.load(Ordering::Relaxed);
}
}
}
/// Returns the capacity of the queue.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::<i32>::new(100);
///
/// assert_eq!(q.capacity(), 100);
/// ```
pub fn capacity(&self) -> usize {
self.cap
}
/// Returns `true` if the queue is empty.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(100);
///
/// assert!(q.is_empty());
/// q.push(1).unwrap();
/// assert!(!q.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
// Is the tail lagging one lap behind head?
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the channel was not empty, so it is safe to just return `false`.
tail == head
}
/// Returns `true` if the queue is full.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(1);
///
/// assert!(!q.is_full());
/// q.push(1).unwrap();
/// assert!(q.is_full());
/// ```
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the queue was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail
}
/// Returns the number of elements in the queue.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::ArrayQueue;
///
/// let q = ArrayQueue::new(100);
/// assert_eq!(q.len(), 0);
///
/// q.push(10).unwrap();
/// assert_eq!(q.len(), 1);
///
/// q.push(20).unwrap();
/// assert_eq!(q.len(), 2);
/// ```
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.one_lap - 1);
let tix = tail & (self.one_lap - 1);
return if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if tail == head {
0
} else {
self.cap
};
}
}
}
}
impl<T> Drop for ArrayQueue<T> {
fn drop(&mut self) {
if mem::needs_drop::<T>() {
// Get the index of the head.
let head = *self.head.get_mut();
let tail = *self.tail.get_mut();
let hix = head & (self.one_lap - 1);
let tix = tail & (self.one_lap - 1);
let len = if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if tail == head {
0
} else {
self.cap
};
// Loop over all slots that hold a message and drop them.
for i in 0..len {
// Compute the index of the next slot holding a message.
let index = if hix + i < self.cap {
hix + i
} else {
hix + i - self.cap
};
unsafe {
debug_assert!(index < self.buffer.len());
let slot = self.buffer.get_unchecked_mut(index);
(*slot.value.get()).assume_init_drop();
}
}
}
}
}
impl<T> fmt::Debug for ArrayQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("ArrayQueue { .. }")
}
}
impl<T> IntoIterator for ArrayQueue<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter { value: self }
}
}
#[derive(Debug)]
pub struct IntoIter<T> {
value: ArrayQueue<T>,
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
let value = &mut self.value;
let head = *value.head.get_mut();
if value.head.get_mut() != value.tail.get_mut() {
let index = head & (value.one_lap - 1);
let lap = head & !(value.one_lap - 1);
// SAFETY: We have mutable access to this, so we can read without
// worrying about concurrency. Furthermore, we know this is
// initialized because it is the value pointed at by `value.head`
// and this is a non-empty queue.
let val = unsafe {
debug_assert!(index < value.buffer.len());
let slot = value.buffer.get_unchecked_mut(index);
slot.value.get().read().assume_init()
};
let new = if index + 1 < value.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), index: 0 }`.
lap.wrapping_add(value.one_lap)
};
*value.head.get_mut() = new;
Option::Some(val)
} else {
Option::None
}
}
}

View file

@ -1,32 +0,0 @@
//! Concurrent queues.
//!
//! This crate provides concurrent queues that can be shared among threads:
//!
//! * [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction.
//! * [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand.
#![doc(test(
no_crate_inject,
attr(
deny(warnings, rust_2018_idioms),
allow(dead_code, unused_assignments, unused_variables)
)
))]
#![warn(
missing_docs,
missing_debug_implementations,
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
extern crate alloc;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod array_queue;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
mod seg_queue;
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
pub use crate::{array_queue::ArrayQueue, seg_queue::SegQueue};

View file

@ -1,549 +0,0 @@
use alloc::boxed::Box;
use core::cell::UnsafeCell;
use core::fmt;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::panic::{RefUnwindSafe, UnwindSafe};
use core::ptr;
use core::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use crossbeam_utils::{Backoff, CachePadded};
// Bits indicating the state of a slot:
// * If a value has been written into the slot, `WRITE` is set.
// * If a value has been read from the slot, `READ` is set.
// * If the block is being destroyed, `DESTROY` is set.
const WRITE: usize = 1;
const READ: usize = 2;
const DESTROY: usize = 4;
// Each block covers one "lap" of indices.
const LAP: usize = 32;
// The maximum number of values a block can hold.
const BLOCK_CAP: usize = LAP - 1;
// How many lower bits are reserved for metadata.
const SHIFT: usize = 1;
// Indicates that the block is not the last one.
const HAS_NEXT: usize = 1;
/// A slot in a block.
struct Slot<T> {
/// The value.
value: UnsafeCell<MaybeUninit<T>>,
/// The state of the slot.
state: AtomicUsize,
}
impl<T> Slot<T> {
const UNINIT: Self = Self {
value: UnsafeCell::new(MaybeUninit::uninit()),
state: AtomicUsize::new(0),
};
/// Waits until a value is written into the slot.
fn wait_write(&self) {
let backoff = Backoff::new();
while self.state.load(Ordering::Acquire) & WRITE == 0 {
backoff.snooze();
}
}
}
/// A block in a linked list.
///
/// Each block in the list can hold up to `BLOCK_CAP` values.
struct Block<T> {
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
/// Slots for values.
slots: [Slot<T>; BLOCK_CAP],
}
impl<T> Block<T> {
/// Creates an empty block that starts at `start_index`.
fn new() -> Block<T> {
Self {
next: AtomicPtr::new(ptr::null_mut()),
slots: [Slot::UNINIT; BLOCK_CAP],
}
}
/// Waits until the next pointer is set.
fn wait_next(&self) -> *mut Block<T> {
let backoff = Backoff::new();
loop {
let next = self.next.load(Ordering::Acquire);
if !next.is_null() {
return next;
}
backoff.snooze();
}
}
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
unsafe fn destroy(this: *mut Block<T>, start: usize) {
// It is not necessary to set the `DESTROY` bit in the last slot because that slot has
// begun destruction of the block.
for i in start..BLOCK_CAP - 1 {
let slot = (*this).slots.get_unchecked(i);
// Mark the `DESTROY` bit if a thread is still using the slot.
if slot.state.load(Ordering::Acquire) & READ == 0
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
{
// If a thread is still using the slot, it will continue destruction of the block.
return;
}
}
// No thread is using the block, now it is safe to destroy it.
drop(Box::from_raw(this));
}
}
/// A position in a queue.
struct Position<T> {
/// The index in the queue.
index: AtomicUsize,
/// The block in the linked list.
block: AtomicPtr<Block<T>>,
}
/// An unbounded multi-producer multi-consumer queue.
///
/// This queue is implemented as a linked list of segments, where each segment is a small buffer
/// that can hold a handful of elements. There is no limit to how many elements can be in the queue
/// at a time. However, since segments need to be dynamically allocated as elements get pushed,
/// this queue is somewhat slower than [`ArrayQueue`].
///
/// [`ArrayQueue`]: super::ArrayQueue
///
/// # Examples
///
/// ```
/// use crossbeam_queue::SegQueue;
///
/// let q = SegQueue::new();
///
/// q.push('a');
/// q.push('b');
///
/// assert_eq!(q.pop(), Some('a'));
/// assert_eq!(q.pop(), Some('b'));
/// assert!(q.pop().is_none());
/// ```
pub struct SegQueue<T> {
/// The head of the queue.
head: CachePadded<Position<T>>,
/// The tail of the queue.
tail: CachePadded<Position<T>>,
/// Indicates that dropping a `SegQueue<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
unsafe impl<T: Send> Send for SegQueue<T> {}
unsafe impl<T: Send> Sync for SegQueue<T> {}
impl<T> UnwindSafe for SegQueue<T> {}
impl<T> RefUnwindSafe for SegQueue<T> {}
impl<T> SegQueue<T> {
/// Creates a new unbounded queue.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::SegQueue;
///
/// let q = SegQueue::<i32>::new();
/// ```
pub const fn new() -> SegQueue<T> {
SegQueue {
head: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
tail: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
_marker: PhantomData,
}
}
/// Pushes an element into the queue.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::SegQueue;
///
/// let q = SegQueue::new();
///
/// q.push(10);
/// q.push(20);
/// ```
pub fn push(&self, value: T) {
let backoff = Backoff::new();
let mut tail = self.tail.index.load(Ordering::Acquire);
let mut block = self.tail.block.load(Ordering::Acquire);
let mut next_block = None;
loop {
// Calculate the offset of the index into the block.
let offset = (tail >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
backoff.snooze();
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
// If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new()));
}
// If this is the first push operation, we need to allocate the first block.
if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new()));
if self
.tail
.block
.compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
self.head.block.store(new, Ordering::Release);
block = new;
} else {
next_block = unsafe { Some(Box::from_raw(new)) };
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
}
let new_tail = tail + (1 << SHIFT);
// Try advancing the tail forward.
match self.tail.index.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, install the next one.
if offset + 1 == BLOCK_CAP {
let next_block = Box::into_raw(next_block.unwrap());
let next_index = new_tail.wrapping_add(1 << SHIFT);
self.tail.block.store(next_block, Ordering::Release);
self.tail.index.store(next_index, Ordering::Release);
(*block).next.store(next_block, Ordering::Release);
}
// Write the value into the slot.
let slot = (*block).slots.get_unchecked(offset);
slot.value.get().write(MaybeUninit::new(value));
slot.state.fetch_or(WRITE, Ordering::Release);
return;
},
Err(t) => {
tail = t;
block = self.tail.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
/// Pops an element from the queue.
///
/// If the queue is empty, `None` is returned.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::SegQueue;
///
/// let q = SegQueue::new();
///
/// q.push(10);
/// assert_eq!(q.pop(), Some(10));
/// assert!(q.pop().is_none());
/// ```
pub fn pop(&self) -> Option<T> {
let backoff = Backoff::new();
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
loop {
// Calculate the offset of the index into the block.
let offset = (head >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
let mut new_head = head + (1 << SHIFT);
if new_head & HAS_NEXT == 0 {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::Relaxed);
// If the tail equals the head, that means the queue is empty.
if head >> SHIFT == tail >> SHIFT {
return None;
}
// If head and tail are not in the same block, set `HAS_NEXT` in head.
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
new_head |= HAS_NEXT;
}
}
// The block can be null here only if the first push operation is in progress. In that
// case, just wait until it gets initialized.
if block.is_null() {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
// Try moving the head index forward.
match self.head.index.compare_exchange_weak(
head,
new_head,
Ordering::SeqCst,
Ordering::Acquire,
) {
Ok(_) => unsafe {
// If we've reached the end of the block, move to the next one.
if offset + 1 == BLOCK_CAP {
let next = (*block).wait_next();
let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT);
if !(*next).next.load(Ordering::Relaxed).is_null() {
next_index |= HAS_NEXT;
}
self.head.block.store(next, Ordering::Release);
self.head.index.store(next_index, Ordering::Release);
}
// Read the value.
let slot = (*block).slots.get_unchecked(offset);
slot.wait_write();
let value = slot.value.get().read().assume_init();
// Destroy the block if we've reached the end, or if another thread wanted to
// destroy but couldn't because we were busy reading from the slot.
if offset + 1 == BLOCK_CAP {
Block::destroy(block, 0);
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
Block::destroy(block, offset + 1);
}
return Some(value);
},
Err(h) => {
head = h;
block = self.head.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
/// Returns `true` if the queue is empty.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::SegQueue;
///
/// let q = SegQueue::new();
///
/// assert!(q.is_empty());
/// q.push(1);
/// assert!(!q.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
let head = self.head.index.load(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::SeqCst);
head >> SHIFT == tail >> SHIFT
}
/// Returns the number of elements in the queue.
///
/// # Examples
///
/// ```
/// use crossbeam_queue::SegQueue;
///
/// let q = SegQueue::new();
/// assert_eq!(q.len(), 0);
///
/// q.push(10);
/// assert_eq!(q.len(), 1);
///
/// q.push(20);
/// assert_eq!(q.len(), 2);
/// ```
pub fn len(&self) -> usize {
loop {
// Load the tail index, then load the head index.
let mut tail = self.tail.index.load(Ordering::SeqCst);
let mut head = self.head.index.load(Ordering::SeqCst);
// If the tail index didn't change, we've got consistent indices to work with.
if self.tail.index.load(Ordering::SeqCst) == tail {
// Erase the lower bits.
tail &= !((1 << SHIFT) - 1);
head &= !((1 << SHIFT) - 1);
// Fix up indices if they fall onto block ends.
if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {
tail = tail.wrapping_add(1 << SHIFT);
}
if (head >> SHIFT) & (LAP - 1) == LAP - 1 {
head = head.wrapping_add(1 << SHIFT);
}
// Rotate indices so that head falls into the first block.
let lap = (head >> SHIFT) / LAP;
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
head = head.wrapping_sub((lap * LAP) << SHIFT);
// Remove the lower bits.
tail >>= SHIFT;
head >>= SHIFT;
// Return the difference minus the number of blocks between tail and head.
return tail - head - tail / LAP;
}
}
}
}
impl<T> Drop for SegQueue<T> {
fn drop(&mut self) {
let mut head = *self.head.index.get_mut();
let mut tail = *self.tail.index.get_mut();
let mut block = *self.head.block.get_mut();
// Erase the lower bits.
head &= !((1 << SHIFT) - 1);
tail &= !((1 << SHIFT) - 1);
unsafe {
// Drop all values between `head` and `tail` and deallocate the heap-allocated blocks.
while head != tail {
let offset = (head >> SHIFT) % LAP;
if offset < BLOCK_CAP {
// Drop the value in the slot.
let slot = (*block).slots.get_unchecked(offset);
(*slot.value.get()).assume_init_drop();
} else {
// Deallocate the block and move to the next one.
let next = *(*block).next.get_mut();
drop(Box::from_raw(block));
block = next;
}
head = head.wrapping_add(1 << SHIFT);
}
// Deallocate the last remaining block.
if !block.is_null() {
drop(Box::from_raw(block));
}
}
}
}
impl<T> fmt::Debug for SegQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("SegQueue { .. }")
}
}
impl<T> Default for SegQueue<T> {
fn default() -> SegQueue<T> {
SegQueue::new()
}
}
impl<T> IntoIterator for SegQueue<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter { value: self }
}
}
#[derive(Debug)]
pub struct IntoIter<T> {
value: SegQueue<T>,
}
impl<T> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
let value = &mut self.value;
let head = *value.head.index.get_mut();
let tail = *value.tail.index.get_mut();
if head >> SHIFT == tail >> SHIFT {
None
} else {
let block = *value.head.block.get_mut();
let offset = (head >> SHIFT) % LAP;
// SAFETY: We have mutable access to this, so we can read without
// worrying about concurrency. Furthermore, we know this is
// initialized because it is the value pointed at by `value.head`
// and this is a non-empty queue.
let item = unsafe {
let slot = (*block).slots.get_unchecked(offset);
slot.value.get().read().assume_init()
};
if offset + 1 == BLOCK_CAP {
// Deallocate the block and move to the next one.
// SAFETY: The block is initialized because we've been reading
// from it this entire time. We can drop it b/c everything has
// been read out of it, so nothing is pointing to it anymore.
unsafe {
let next = *(*block).next.get_mut();
drop(Box::from_raw(block));
*value.head.block.get_mut() = next;
}
// The last value in a block is empty, so skip it
*value.head.index.get_mut() = head.wrapping_add(2 << SHIFT);
// Double-check that we're pointing to the first item in a block.
debug_assert_eq!((*value.head.index.get_mut() >> SHIFT) % LAP, 0);
} else {
*value.head.index.get_mut() = head.wrapping_add(1 << SHIFT);
}
Some(item)
}
}
}

View file

@ -1,374 +0,0 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_queue::ArrayQueue;
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
#[test]
fn smoke() {
let q = ArrayQueue::new(1);
q.push(7).unwrap();
assert_eq!(q.pop(), Some(7));
q.push(8).unwrap();
assert_eq!(q.pop(), Some(8));
assert!(q.pop().is_none());
}
#[test]
fn capacity() {
for i in 1..10 {
let q = ArrayQueue::<i32>::new(i);
assert_eq!(q.capacity(), i);
}
}
#[test]
#[should_panic(expected = "capacity must be non-zero")]
fn zero_capacity() {
let _ = ArrayQueue::<i32>::new(0);
}
#[test]
fn len_empty_full() {
let q = ArrayQueue::new(2);
assert_eq!(q.len(), 0);
assert!(q.is_empty());
assert!(!q.is_full());
q.push(()).unwrap();
assert_eq!(q.len(), 1);
assert!(!q.is_empty());
assert!(!q.is_full());
q.push(()).unwrap();
assert_eq!(q.len(), 2);
assert!(!q.is_empty());
assert!(q.is_full());
q.pop().unwrap();
assert_eq!(q.len(), 1);
assert!(!q.is_empty());
assert!(!q.is_full());
}
#[test]
fn len() {
#[cfg(miri)]
const COUNT: usize = 30;
#[cfg(not(miri))]
const COUNT: usize = 25_000;
#[cfg(miri)]
const CAP: usize = 40;
#[cfg(not(miri))]
const CAP: usize = 1000;
const ITERS: usize = CAP / 20;
let q = ArrayQueue::new(CAP);
assert_eq!(q.len(), 0);
for _ in 0..CAP / 10 {
for i in 0..ITERS {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for i in 0..ITERS {
q.pop().unwrap();
assert_eq!(q.len(), ITERS - i - 1);
}
}
assert_eq!(q.len(), 0);
for i in 0..CAP {
q.push(i).unwrap();
assert_eq!(q.len(), i + 1);
}
for _ in 0..CAP {
q.pop().unwrap();
}
assert_eq!(q.len(), 0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
loop {
if let Some(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
let len = q.len();
assert!(len <= CAP);
}
});
scope.spawn(|_| {
for i in 0..COUNT {
while q.push(i).is_err() {}
let len = q.len();
assert!(len <= CAP);
}
});
})
.unwrap();
assert_eq!(q.len(), 0);
}
#[test]
fn spsc() {
#[cfg(miri)]
const COUNT: usize = 50;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
let q = ArrayQueue::new(3);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
loop {
if let Some(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_none());
});
scope.spawn(|_| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
});
})
.unwrap();
}
#[test]
fn spsc_ring_buffer() {
#[cfg(miri)]
const COUNT: usize = 50;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
let t = AtomicUsize::new(1);
let q = ArrayQueue::<usize>::new(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
scope.spawn(|_| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Some(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if let Some(n) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
});
})
.unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), 1);
}
}
#[test]
fn mpmc() {
#[cfg(miri)]
const COUNT: usize = 50;
#[cfg(not(miri))]
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = ArrayQueue::<usize>::new(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = loop {
if let Some(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
while q.push(i).is_err() {}
}
});
}
})
.unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn mpmc_ring_buffer() {
#[cfg(miri)]
const COUNT: usize = 50;
#[cfg(not(miri))]
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let t = AtomicUsize::new(THREADS);
let q = ArrayQueue::<usize>::new(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| loop {
match t.load(Ordering::SeqCst) {
0 if q.is_empty() => break,
_ => {
while let Some(n) = q.pop() {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
if let Some(n) = q.force_push(i) {
v[n].fetch_add(1, Ordering::SeqCst);
}
}
t.fetch_sub(1, Ordering::SeqCst);
});
}
})
.unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn drops() {
let runs: usize = if cfg!(miri) { 3 } else { 100 };
let steps: usize = if cfg!(miri) { 50 } else { 10_000 };
let additional: usize = if cfg!(miri) { 10 } else { 50 };
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..runs {
let steps = rng.gen_range(0..steps);
let additional = rng.gen_range(0..additional);
DROPS.store(0, Ordering::SeqCst);
let q = ArrayQueue::new(50);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
while q.pop().is_none() {}
}
});
scope.spawn(|_| {
for _ in 0..steps {
while q.push(DropCounter).is_err() {
DROPS.fetch_sub(1, Ordering::SeqCst);
}
}
});
})
.unwrap();
for _ in 0..additional {
q.push(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn linearizable() {
#[cfg(miri)]
const COUNT: usize = 100;
#[cfg(not(miri))]
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = ArrayQueue::new(THREADS);
scope(|scope| {
for _ in 0..THREADS / 2 {
scope.spawn(|_| {
for _ in 0..COUNT {
while q.push(0).is_err() {}
q.pop().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..COUNT {
if q.force_push(0).is_none() {
q.pop().unwrap();
}
}
});
}
})
.unwrap();
}
#[test]
fn into_iter() {
let q = ArrayQueue::new(100);
for i in 0..100 {
q.push(i).unwrap();
}
for (i, j) in q.into_iter().enumerate() {
assert_eq!(i, j);
}
}

View file

@ -1,195 +0,0 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_queue::SegQueue;
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
#[test]
fn smoke() {
let q = SegQueue::new();
q.push(7);
assert_eq!(q.pop(), Some(7));
q.push(8);
assert_eq!(q.pop(), Some(8));
assert!(q.pop().is_none());
}
#[test]
fn len_empty_full() {
let q = SegQueue::new();
assert_eq!(q.len(), 0);
assert!(q.is_empty());
q.push(());
assert_eq!(q.len(), 1);
assert!(!q.is_empty());
q.pop().unwrap();
assert_eq!(q.len(), 0);
assert!(q.is_empty());
}
#[test]
fn len() {
let q = SegQueue::new();
assert_eq!(q.len(), 0);
for i in 0..50 {
q.push(i);
assert_eq!(q.len(), i + 1);
}
for i in 0..50 {
q.pop().unwrap();
assert_eq!(q.len(), 50 - i - 1);
}
assert_eq!(q.len(), 0);
}
#[test]
fn spsc() {
#[cfg(miri)]
const COUNT: usize = 100;
#[cfg(not(miri))]
const COUNT: usize = 100_000;
let q = SegQueue::new();
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
loop {
if let Some(x) = q.pop() {
assert_eq!(x, i);
break;
}
}
}
assert!(q.pop().is_none());
});
scope.spawn(|_| {
for i in 0..COUNT {
q.push(i);
}
});
})
.unwrap();
}
#[test]
fn mpmc() {
#[cfg(miri)]
const COUNT: usize = 50;
#[cfg(not(miri))]
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let q = SegQueue::<usize>::new();
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = loop {
if let Some(x) = q.pop() {
break x;
}
};
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
q.push(i);
}
});
}
})
.unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn drops() {
let runs: usize = if cfg!(miri) { 5 } else { 100 };
let steps: usize = if cfg!(miri) { 50 } else { 10_000 };
let additional: usize = if cfg!(miri) { 100 } else { 1_000 };
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..runs {
let steps = rng.gen_range(0..steps);
let additional = rng.gen_range(0..additional);
DROPS.store(0, Ordering::SeqCst);
let q = SegQueue::new();
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
while q.pop().is_none() {}
}
});
scope.spawn(|_| {
for _ in 0..steps {
q.push(DropCounter);
}
});
})
.unwrap();
for _ in 0..additional {
q.push(DropCounter);
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(q);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn into_iter() {
let q = SegQueue::new();
for i in 0..100 {
q.push(i);
}
for (i, j) in q.into_iter().enumerate() {
assert_eq!(i, j);
}
}
#[test]
fn into_iter_drop() {
let q = SegQueue::new();
for i in 0..100 {
q.push(i);
}
for (i, j) in q.into_iter().enumerate().take(50) {
assert_eq!(i, j);
}
}

View file

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"87ba1b0829aea990ab3d7f2a30899c9d5e1067fba07c2abefae1fd004f2a5982","Cargo.toml":"9411d90fcc5e6badf13c9b3c133f2330b2b900dc4c851cfea36b8035cad7ca40","LICENSE-APACHE":"6f712474a3e3be1386d2d0c29449850ea788da64d35cff0fc8799acf741e9ecd","LICENSE-MIT":"5734ed989dfca1f625b40281ee9f4530f91b2411ec01cb748223e7eb87e201ab","README.md":"dbe15407a26bdae7ecdcb376b8c62170104156f12c3c1ed8b121e9d6e763d63b","build-common.rs":"502cb7494549bed6fa10ac7bea36e880eeb60290dc69b679ac5c92b376469562","no_atomic.rs":"31a8276afd38e39987a169eeb02e9bed32670de5ca36d7eb74aab7e506cf9dc4","src/lib.rs":"27e0ffe5571c303464e8e30b6e2d486ccb342cb4293d49f282666e1f63fc39cc","tests/subcrates.rs":"bb9715e661d65bf9e61d41ad838f09dc531d624e705ab8650e1ee549a1e1feaf"},"package":"1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8"}

View file

@ -1,105 +0,0 @@
# Version 0.8.4
- Remove dependency on `cfg-if`. (#1072)
# Version 0.8.3
- Bump the minimum supported Rust version to 1.61. (#1037)
# Version 0.8.2
- Bump the minimum supported Rust version to 1.38. (#877)
# Version 0.8.1
- Support targets that do not have atomic CAS on stable Rust (#698)
# Version 0.8.0
- Bump the minimum supported Rust version to 1.36.
- Bump `crossbeam-channel` to `0.5`.
- Bump `crossbeam-deque` to `0.8`.
- Bump `crossbeam-epoch` to `0.9`.
- Bump `crossbeam-queue` to `0.3`.
- Bump `crossbeam-utils` to `0.8`.
# Version 0.7.3
- Fix breakage with nightly feature due to rust-lang/rust#65214.
- Bump `crossbeam-channel` to `0.4`.
- Bump `crossbeam-epoch` to `0.8`.
- Bump `crossbeam-queue` to `0.2`.
- Bump `crossbeam-utils` to `0.7`.
# Version 0.7.2
- Bump `crossbeam-channel` to `0.3.9`.
- Bump `crossbeam-epoch` to `0.7.2`.
- Bump `crossbeam-utils` to `0.6.6`.
# Version 0.7.1
- Bump `crossbeam-utils` to `0.6.5`.
# Version 0.7.0
- Remove `ArcCell`, `MsQueue`, and `TreiberStack`.
- Change the interface of `ShardedLock` to match `RwLock`.
- Add `SegQueue::len()`.
- Rename `SegQueue::try_pop()` to `SegQueue::pop()`.
- Change the return type of `SegQueue::pop()` to `Result`.
- Introduce `ArrayQueue`.
- Update dependencies.
# Version 0.6.0
- Update dependencies.
# Version 0.5.0
- Update `crossbeam-channel` to 0.3.
- Update `crossbeam-utils` to 0.6.
- Add `AtomicCell`, `SharedLock`, and `WaitGroup`.
# Version 0.4.1
- Fix a double-free bug in `MsQueue` and `SegQueue`.
# Version 0.4
- Switch to the new implementation of epoch-based reclamation in
[`crossbeam-epoch`](https://github.com/crossbeam-rs/crossbeam-epoch), fixing numerous bugs in the
old implementation. Its API is changed in a backward-incompatible way.
- Switch to the new implementation of `CachePadded` and scoped thread in
[`crossbeam-utils`](https://github.com/crossbeam-rs/crossbeam-utils). The scoped thread API is
changed in a backward-incompatible way.
- Switch to the new implementation of Chase-Lev deque in
[`crossbeam-deque`](https://github.com/crossbeam-rs/crossbeam-deque). Its API is changed in a
backward-incompatible way.
- Export channel implemented in
[`crossbeam-channel`](https://github.com/crossbeam-rs/crossbeam-channel).
- Remove `AtomicOption`.
- Implement `Default` and `From` traits.
# Version 0.3
- Introduced `ScopedThreadBuilder` with the ability to name threads and set stack size
- `Worker` methods in the Chase-Lev deque don't require mutable access anymore
- Fixed a bug when unblocking `pop()` in `MsQueue`
- Implemented `Drop` for `MsQueue`, `SegQueue`, and `TreiberStack`
- Implemented `Default` for `TreiberStack`
- Added `is_empty` to `SegQueue`
- Renamed `mem::epoch` to `epoch`
- Other bug fixes
# Version 0.2
- Changed existing non-blocking `pop` methods to `try_pop`
- Added blocking `pop` support to Michael-Scott queue
- Added Chase-Lev work-stealing deque
# Version 0.1
- Added [epoch-based memory management](http://aturon.github.io/blog/2015/08/27/epoch/)
- Added Michael-Scott queue
- Added Segmented array queue

View file

@ -1,86 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.61"
name = "crossbeam"
version = "0.8.4"
exclude = [
"/.*",
"/ci",
"/tools",
]
description = "Tools for concurrent programming"
homepage = "https://github.com/crossbeam-rs/crossbeam"
readme = "README.md"
keywords = [
"atomic",
"garbage",
"non-blocking",
"lock-free",
"rcu",
]
categories = [
"concurrency",
"memory-management",
"data-structures",
"no-std",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.crossbeam-channel]
version = "0.5.10"
optional = true
default-features = false
[dependencies.crossbeam-deque]
version = "0.8.4"
optional = true
default-features = false
[dependencies.crossbeam-epoch]
version = "0.9.17"
optional = true
default-features = false
[dependencies.crossbeam-queue]
version = "0.3.10"
optional = true
default-features = false
[dependencies.crossbeam-utils]
version = "0.8.18"
default-features = false
[dev-dependencies.rand]
version = "0.8"
[features]
alloc = [
"crossbeam-epoch/alloc",
"crossbeam-queue/alloc",
]
default = ["std"]
nightly = [
"crossbeam-epoch/nightly",
"crossbeam-utils/nightly",
"crossbeam-queue/nightly",
]
std = [
"alloc",
"crossbeam-channel/std",
"crossbeam-deque/std",
"crossbeam-epoch/std",
"crossbeam-queue/std",
"crossbeam-utils/std",
]

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 The Crossbeam Project Developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,27 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 The Crossbeam Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -1,158 +0,0 @@
# Crossbeam
[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)](
https://github.com/crossbeam-rs/crossbeam/actions)
[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam#license)
[![Cargo](https://img.shields.io/crates/v/crossbeam.svg)](
https://crates.io/crates/crossbeam)
[![Documentation](https://docs.rs/crossbeam/badge.svg)](
https://docs.rs/crossbeam)
[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)](
https://www.rust-lang.org)
[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ)
This crate provides a set of tools for concurrent programming:
#### Atomics
* [`AtomicCell`], a thread-safe mutable memory location.<sup>(no_std)</sup>
* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.<sup>(no_std)</sup>
#### Data structures
* [`deque`], work-stealing deques for building task schedulers.
* [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction.<sup>(alloc)</sup>
* [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand.<sup>(alloc)</sup>
#### Memory management
* [`epoch`], an epoch-based garbage collector.<sup>(alloc)</sup>
#### Thread synchronization
* [`channel`], multi-producer multi-consumer channels for message passing.
* [`Parker`], a thread parking primitive.
* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads.
* [`WaitGroup`], for synchronizing the beginning or end of some computation.
#### Utilities
* [`Backoff`], for exponential backoff in spin loops.<sup>(no_std)</sup>
* [`CachePadded`], for padding and aligning a value to the length of a cache line.<sup>(no_std)</sup>
* [`scope`], for spawning threads that borrow local variables from the stack.
*Features marked with <sup>(no_std)</sup> can be used in `no_std` environments.*<br/>
*Features marked with <sup>(alloc)</sup> can be used in `no_std` environments, but only if `alloc`
feature is enabled.*
[`AtomicCell`]: https://docs.rs/crossbeam/*/crossbeam/atomic/struct.AtomicCell.html
[`AtomicConsume`]: https://docs.rs/crossbeam/*/crossbeam/atomic/trait.AtomicConsume.html
[`deque`]: https://docs.rs/crossbeam/*/crossbeam/deque/index.html
[`ArrayQueue`]: https://docs.rs/crossbeam/*/crossbeam/queue/struct.ArrayQueue.html
[`SegQueue`]: https://docs.rs/crossbeam/*/crossbeam/queue/struct.SegQueue.html
[`channel`]: https://docs.rs/crossbeam/*/crossbeam/channel/index.html
[`Parker`]: https://docs.rs/crossbeam/*/crossbeam/sync/struct.Parker.html
[`ShardedLock`]: https://docs.rs/crossbeam/*/crossbeam/sync/struct.ShardedLock.html
[`WaitGroup`]: https://docs.rs/crossbeam/*/crossbeam/sync/struct.WaitGroup.html
[`epoch`]: https://docs.rs/crossbeam/*/crossbeam/epoch/index.html
[`Backoff`]: https://docs.rs/crossbeam/*/crossbeam/utils/struct.Backoff.html
[`CachePadded`]: https://docs.rs/crossbeam/*/crossbeam/utils/struct.CachePadded.html
[`scope`]: https://docs.rs/crossbeam/*/crossbeam/fn.scope.html
## Crates
The main `crossbeam` crate just [re-exports](src/lib.rs) tools from
smaller subcrates:
* [`crossbeam-channel`](crossbeam-channel)
provides multi-producer multi-consumer channels for message passing.
* [`crossbeam-deque`](crossbeam-deque)
provides work-stealing deques, which are primarily intended for building task schedulers.
* [`crossbeam-epoch`](crossbeam-epoch)
provides epoch-based garbage collection for building concurrent data structures.
* [`crossbeam-queue`](crossbeam-queue)
provides concurrent queues that can be shared among threads.
* [`crossbeam-utils`](crossbeam-utils)
provides atomics, synchronization primitives, scoped threads, and other utilities.
There is one more experimental subcrate that is not yet included in `crossbeam`:
* [`crossbeam-skiplist`](crossbeam-skiplist)
provides concurrent maps and sets based on lock-free skip lists.
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam = "0.8"
```
## Compatibility
Crossbeam supports stable Rust releases going back at least six months,
and every time the minimum supported Rust version is increased, a new minor
version is released. Currently, the minimum supported Rust version is 1.61.
## Contributing
Crossbeam welcomes contribution from everyone in the form of suggestions, bug reports,
pull requests, and feedback. 💛
If you need ideas for contribution, there are several ways to get started:
* Found a bug or have a feature request?
[Submit an issue](https://github.com/crossbeam-rs/crossbeam/issues/new)!
* Issues and PRs labeled with
[feedback wanted](https://github.com/crossbeam-rs/crossbeam/issues?utf8=%E2%9C%93&q=is%3Aopen+sort%3Aupdated-desc+label%3A%22feedback+wanted%22+)
need feedback from users and contributors.
* Issues labeled with
[good first issue](https://github.com/crossbeam-rs/crossbeam/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22)
are relatively easy starter issues.
#### RFCs
We also have the [RFCs](https://github.com/crossbeam-rs/rfcs) repository for more
high-level discussion, which is the place where we brainstorm ideas and propose
substantial changes to Crossbeam.
You are welcome to participate in any open
[issues](https://github.com/crossbeam-rs/rfcs/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc)
or
[pull requests](https://github.com/crossbeam-rs/rfcs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc).
#### Learning resources
If you'd like to learn more about concurrency and non-blocking data structures, there's a
list of learning resources in our [wiki](https://github.com/crossbeam-rs/rfcs/wiki),
which includes relevant blog posts, papers, videos, and other similar projects.
Another good place to visit is [merged RFCs](https://github.com/crossbeam-rs/rfcs/tree/master/text).
They contain elaborate descriptions and rationale for features we've introduced to
Crossbeam, but keep in mind that some of the written information is now out of date.
#### Conduct
The Crossbeam project adheres to the
[Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct).
This describes the minimum behavior expected from all contributors.
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
Some Crossbeam subcrates have additional licensing notices.
Take a look at other readme files in this repository for more information.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View file

@ -1,13 +0,0 @@
// The target triplets have the form of 'arch-vendor-system'.
//
// When building for Linux (e.g. the 'system' part is
// 'linux-something'), replace the vendor with 'unknown'
// so that mapping to rust standard targets happens correctly.
fn convert_custom_linux_target(target: String) -> String {
let mut parts: Vec<&str> = target.split('-').collect();
let system = parts.get(2);
if system == Some(&"linux") {
parts[1] = "unknown";
};
parts.join("-")
}

View file

@ -1,12 +0,0 @@
// This file is @generated by no_atomic.sh.
// It is not intended for manual editing.
const NO_ATOMIC: &[&str] = &[
"bpfeb-unknown-none",
"bpfel-unknown-none",
"mipsel-sony-psx",
"msp430-none-elf",
"riscv32i-unknown-none-elf",
"riscv32im-unknown-none-elf",
"riscv32imc-unknown-none-elf",
];

View file

@ -1,81 +0,0 @@
//! Tools for concurrent programming.
//!
//! ## Atomics
//!
//! * [`AtomicCell`], a thread-safe mutable memory location.
//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.
//!
//! ## Data structures
//!
//! * [`deque`], work-stealing deques for building task schedulers.
//! * [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction.
//! * [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand.
//!
//! ## Memory management
//!
//! * [`epoch`], an epoch-based garbage collector.
//!
//! ## Thread synchronization
//!
//! * [`channel`], multi-producer multi-consumer channels for message passing.
//! * [`Parker`], a thread parking primitive.
//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads.
//! * [`WaitGroup`], for synchronizing the beginning or end of some computation.
//!
//! ## Utilities
//!
//! * [`Backoff`], for exponential backoff in spin loops.
//! * [`CachePadded`], for padding and aligning a value to the length of a cache line.
//! * [`scope`], for spawning threads that borrow local variables from the stack.
//!
//! [`AtomicCell`]: atomic::AtomicCell
//! [`AtomicConsume`]: atomic::AtomicConsume
//! [`ArrayQueue`]: queue::ArrayQueue
//! [`SegQueue`]: queue::SegQueue
//! [`Parker`]: sync::Parker
//! [`ShardedLock`]: sync::ShardedLock
//! [`WaitGroup`]: sync::WaitGroup
//! [`Backoff`]: utils::Backoff
//! [`CachePadded`]: utils::CachePadded
#![doc(test(
no_crate_inject,
attr(
deny(warnings, rust_2018_idioms),
allow(dead_code, unused_assignments, unused_variables)
)
))]
#![warn(
missing_docs,
missing_debug_implementations,
rust_2018_idioms,
unreachable_pub
)]
#![cfg_attr(not(feature = "std"), no_std)]
pub use crossbeam_utils::atomic;
pub mod utils {
//! Miscellaneous utilities.
//!
//! * [`Backoff`], for exponential backoff in spin loops.
//! * [`CachePadded`], for padding and aligning a value to the length of a cache line.
pub use crossbeam_utils::Backoff;
pub use crossbeam_utils::CachePadded;
}
#[cfg(feature = "alloc")]
#[doc(inline)]
pub use {crossbeam_epoch as epoch, crossbeam_queue as queue};
#[cfg(feature = "std")]
#[doc(inline)]
pub use {
crossbeam_channel as channel, crossbeam_channel::select, crossbeam_deque as deque,
crossbeam_utils::sync,
};
#[cfg(feature = "std")]
#[cfg(not(crossbeam_loom))]
pub use crossbeam_utils::thread::{self, scope};

View file

@ -1,48 +0,0 @@
//! Makes sure subcrates are properly re-exported.
use crossbeam::select;
#[test]
fn channel() {
let (s, r) = crossbeam::channel::bounded(1);
select! {
send(s, 0) -> res => res.unwrap(),
recv(r) -> res => assert!(res.is_ok()),
}
}
#[test]
fn deque() {
let w = crossbeam::deque::Worker::new_fifo();
w.push(1);
let _ = w.pop();
}
#[test]
#[cfg_attr(miri, ignore)] // Miri ICE: https://github.com/crossbeam-rs/crossbeam/pull/870#issuecomment-1189209073
fn epoch() {
crossbeam::epoch::pin();
}
#[test]
fn queue() {
let a = crossbeam::queue::ArrayQueue::new(10);
let _ = a.push(1);
let _ = a.pop();
}
#[test]
fn utils() {
crossbeam::utils::CachePadded::new(7);
crossbeam::scope(|scope| {
scope.spawn(|_| ());
})
.unwrap();
crossbeam::thread::scope(|scope| {
scope.spawn(|_| ());
})
.unwrap();
}

View file

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"b980ff14aa7e728c69b6966f68ade64cd6259b89d6b61a099bfd0dd47960990b","Cargo.lock":"4e4e145371cef95fe347e7b14731fbf8f8aef366f45eb45f37760892a966e35b","Cargo.toml":"ad05a3833cfb3693e44c8341e23a85bd88d9578833193341b1b016cdbf99b8fd","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"d6083c077bb087914386be267b52b98bca521f9af5b40b7fd82960d3ae2d2e3a","build.rs":"fbd0d04cc64884da6b65ad460084ad49e56f8a14fba24a256e161cb18b15441c","examples/custom_handler.rs":"33fa83c1ac4a6af8511796cc3b4818d4d4a50fa5eac5b9533a91acd695337169","examples/eyre-usage.rs":"3380d5176d433209eadeb355c2884fd0d46dc6a636b7402fab8fae17f79fa6c0","src/backtrace.rs":"02e509dd794ee2814b1342879414373935bcc33b433e45a58193297e52f95db7","src/chain.rs":"342161434eaa3db018541a24e01531135b471dfac44f7d25c84875c4dc5692d1","src/context.rs":"97f5c3fbe0679db8203ba5983817124df0d888d39e4fc8a55f33f568b4c536e0","src/error.rs":"b2395cb008713ec5112d332c81d0edc293a71bb992dc0d1a42db67ec5301dc10","src/error/pyo3_compat.rs":"6a3b48211b5496944aac8e058cbca85d37f379b3fc18b57c5e00ce56832d47bc","src/fmt.rs":"101f0fc55eba79900dafe04874f2b8f144d5da884b2087b77cda9fc1588d4d8c","src/kind.rs":"12aa656231f87f33367ac1db011dee70f87adff119305cf9d40faf3140234436","src/lib.rs":"32812e58622ad0d0fcc862163fda9c0f13497bc38df5cdd214f04d78a6d069e6","src/macros.rs":"22f30ae6fa6c130db4fa15913caf690cb33429e69cbf9f6371db586f1e0a5002","src/option.rs":"06271311605414d5f850efdf61a8c6976730f90cb03dc0712faa53f051a34509","src/ptr.rs":"81c21f61e9063db1eea3152ced7f01dd02d78ec64d09bf7f8ea0936e1d9772ad","src/wrapper.rs":"1d92365de5b679cc8bd328895724e6f7283e48ebf826bdbfac594813b2f96243","tests/common/mod.rs":"8094c30a551b8d4b04b474e5d4094bfd56f714fd51a8e2f4636eb823c5d08b1c","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/drop/mod.rs":"bd6d59b699a2901de2bda3c40c899af4ee82e4e98617516ea41c327b99af51e1","tests/test_autotrait.rs":"18b0c73026e9bbbc5272e8ed832ccb2522a606b64d50c80316495013f6acc808","tests/test_boxed.rs":"badf6e661aa3076ca644f6c0275991c8d26c5d3684bb2cb260ca2f58482594e8","tests/test_chain.rs":"8c7a75e38d241e9931a3530b30fab80ef87488daeecf87c84f72069d34650188","tests/test_context.rs":"816bdfa55f2fda0abde014817e46227a2da3effa0f697cfa5d2ca218650b946e","tests/test_context_access.rs":"dc490cfc031ac3907314a578c9bb9dfa51be95b041a62f771e39b79d66b763d3","tests/test_convert.rs":"300644b4ebf0cb94b542f6e306779199bc7fce7cc7bec956a1156767d847ae44","tests/test_downcast.rs":"4172648f5f48d489fe5b2e0e962dc2c35624eef5f907192db63f11493a396008","tests/test_fmt.rs":"a1ed4b79bea5f04006868d5145980ea6936d2eb4feee4b87e2c675d4f0b465ff","tests/test_location.rs":"79725e31e961df162b6e1f1194e7a0e5f45740bbeb7700293c2880360050ae28","tests/test_macros.rs":"bd1577f801a24abfb2870de9af9a9224a49025cbc8227a3b237bc73669592ba7","tests/test_no_install.rs":"9b4d695d9c699d5c39c7c4fd178b30fe472e96efbf2e8e4448ce72d6f2b3101b","tests/test_option.rs":"ae49e0b486f35bcd485765b1f6ebd0e02173b574b11c279c5597caf2f84f8c71","tests/test_pyo3.rs":"a1006f46d317e9e7a0c7c57f3502b9af594b62cb717269fee220f175d9ba5f17","tests/test_repr.rs":"a105eba1500a0bd7e36895bf03d43e2c9dbb6c573a0051c80f7ea7bdac103f78","tests/test_source.rs":"7d3cc674b802e46f84230db2e72b9a66b3bff39ae8d0246ab31607a8e6c01f27","tests/test_toolchain.rs":"96f9aacc8d9fe33d6e1d90a8f329d54d7ad5880348c64a87894ca916321c8c10"},"package":"7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec"}

View file

@ -1,91 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
<!-- next-header -->
## [Unreleased] - ReleaseDate
## [0.6.12] - 2024-01-31
### Fixed
- Unsound cast to invalid type during Report downcast [by ten3roberts](https://github.com/eyre-rs/eyre/pull/143)
## [0.6.11] - 2023-12-13
### Fixed
- stale references to `Error` in docstrings [by birkenfeld](https://github.com/eyre-rs/eyre/pull/87)
### Added
- one-argument ensure!($expr) [by sharnoff](https://github.com/eyre-rs/eyre/pull/86)
- documentation on the performance characteristics of `wrap_err` vs `wrap_err_with` [by akshayknarayan](https://github.com/eyre-rs/eyre/pull/93)
- tl;dr: `wrap_err_with` is faster unless the constructed error object already exists
- ~~automated conversion to external errors for ensure! and bail! [by j-baker](https://github.com/eyre-rs/eyre/pull/95)~~ breaking change: shelved for next major release
- eyre::Ok for generating eyre::Ok() without fully specifying the type [by kylewlacy](https://github.com/eyre-rs/eyre/pull/91)
- `OptionExt::ok_or_eyre` for yielding static `Report`s from `None` [by LeoniePhiline](https://github.com/eyre-rs/eyre/pull/125)
### New Contributors
- @sharnoff made their first contribution in https://github.com/eyre-rs/eyre/pull/86
- @akshayknarayan made their first contribution in https://github.com/eyre-rs/eyre/pull/93
- @j-baker made their first contribution in https://github.com/eyre-rs/eyre/pull/95
- @kylewlacy made their first contribution in https://github.com/eyre-rs/eyre/pull/91
- @LeoniePhiline made their first contribution in https://github.com/eyre-rs/eyre/pull/129
~~## [0.6.10] - 2023-12-07~~ Yanked
## [0.6.9] - 2023-11-17
### Fixed
- stacked borrows when dropping [by TimDiekmann](https://github.com/eyre-rs/eyre/pull/81)
- miri validation errors through now stricter provenance [by ten3roberts](https://github.com/eyre-rs/eyre/pull/103)
- documentation on no_std support [by thenorili](https://github.com/eyre-rs/eyre/pull/111)
### Added
- monorepo for eyre-related crates [by pksunkara](https://github.com/eyre-rs/eyre/pull/104), [[2]](https://github.com/eyre-rs/eyre/pull/105)[[3]](https://github.com/eyre-rs/eyre/pull/107)
- CONTRIBUTING.md [by yaahc](https://github.com/eyre-rs/eyre/pull/99)
## [0.6.8] - 2022-04-04
### Added
- `#[must_use]` to `Report`
- `must-install` feature to help reduce binary sizes when using a custom `EyreHandler`
## [0.6.7] - 2022-02-24
### Fixed
- missing track_caller annotation to new format arg capture constructor
## [0.6.6] - 2022-01-19
### Added
- support for format arguments capture on 1.58 and later
## [0.6.5] - 2021-01-05
### Added
- optional support for converting into `pyo3` exceptions
## [0.6.4] - 2021-01-04
### Fixed
- missing track_caller annotations to `wrap_err` related trait methods
## [0.6.3] - 2020-11-10
### Fixed
- missing track_caller annotation to autoref specialization functions
## [0.6.2] - 2020-10-27
### Fixed
- missing track_caller annotation to new_adhoc function
## [0.6.1] - 2020-09-28
### Added
- support for track_caller on rust versions where it is available
<!-- next-url -->
[Unreleased]: https://github.com/eyre-rs/eyre/compare/v0.6.11...HEAD
[0.6.11]: https://github.com/eyre-rs/eyre/compare/v0.6.9...v0.6.11
[0.6.9]: https://github.com/eyre-rs/eyre/compare/v0.6.8...v0.6.9
[0.6.8]: https://github.com/eyre-rs/eyre/compare/v0.6.7...v0.6.8
[0.6.7]: https://github.com/eyre-rs/eyre/compare/v0.6.6...v0.6.7
[0.6.6]: https://github.com/eyre-rs/eyre/compare/v0.6.5...v0.6.6
[0.6.5]: https://github.com/eyre-rs/eyre/compare/v0.6.4...v0.6.5
[0.6.4]: https://github.com/eyre-rs/eyre/compare/v0.6.3...v0.6.4
[0.6.3]: https://github.com/eyre-rs/eyre/compare/v0.6.2...v0.6.3
[0.6.2]: https://github.com/eyre-rs/eyre/compare/v0.6.1...v0.6.2
[0.6.1]: https://github.com/eyre-rs/eyre/releases/tag/v0.6.1

504
third-party/vendor/eyre/Cargo.lock generated vendored
View file

@ -1,504 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "anyhow"
version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "backtrace"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "basic-toml"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f2139706359229bfa8f19142ac1155b4b80beafb7a60471ac5dd109d4a19778"
dependencies = [
"serde",
]
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "dissimilar"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632"
[[package]]
name = "eyre"
version = "0.6.12"
dependencies = [
"anyhow",
"backtrace",
"futures",
"indenter",
"once_cell",
"pyo3",
"rustversion",
"syn",
"thiserror",
"trybuild",
]
[[package]]
name = "futures"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c"
[[package]]
name = "futures-io"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa"
[[package]]
name = "futures-sink"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817"
[[package]]
name = "futures-task"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2"
[[package]]
name = "futures-util"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104"
dependencies = [
"futures-core",
"futures-sink",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "gimli"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "glob"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "indenter"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683"
[[package]]
name = "instant"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if",
]
[[package]]
name = "itoa"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "libc"
version = "0.2.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c"
[[package]]
name = "lock_api"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "memchr"
version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
[[package]]
name = "memoffset"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
dependencies = [
"autocfg",
]
[[package]]
name = "miniz_oxide"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
dependencies = [
"adler",
]
[[package]]
name = "object"
version = "0.32.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "parking_lot"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
dependencies = [
"cfg-if",
"instant",
"libc",
"redox_syscall",
"smallvec",
"winapi",
]
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "proc-macro2"
version = "1.0.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04e8453b658fe480c3e70c8ed4e3d3ec33eb74988bd186561b0cc66b85c3bc4b"
dependencies = [
"cfg-if",
"libc",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
]
[[package]]
name = "pyo3-build-config"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a96fe70b176a89cff78f2fa7b3c930081e163d5379b4dcdf993e3ae29ca662e5"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "214929900fd25e6604661ed9cf349727c8920d47deff196c4e28165a6ef2a96b"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "quote"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
"proc-macro2",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustversion"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "ryu"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "smallvec"
version = "1.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970"
[[package]]
name = "syn"
version = "2.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "target-lexicon"
version = "0.12.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a"
[[package]]
name = "termcolor"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449"
dependencies = [
"winapi-util",
]
[[package]]
name = "thiserror"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "trybuild"
version = "1.0.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "196a58260a906cedb9bf6d8034b6379d0c11f552416960452f267402ceeddff1"
dependencies = [
"basic-toml",
"dissimilar",
"glob",
"once_cell",
"serde",
"serde_derive",
"serde_json",
"termcolor",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View file

@ -1,84 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
rust-version = "1.65.0"
name = "eyre"
version = "0.6.12"
authors = [
"David Tolnay <dtolnay@gmail.com>",
"Jane Lusby <jlusby42@gmail.com>",
]
description = "Flexible concrete Error Reporting type built on std::error::Error with customizable Reports"
documentation = "https://docs.rs/eyre"
readme = "README.md"
categories = ["rust-patterns"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/eyre-rs/eyre"
[package.metadata.docs.rs]
rustdoc-args = [
"--cfg",
"doc_cfg",
]
targets = ["x86_64-unknown-linux-gnu"]
[package.metadata.workspaces]
independent = true
[dependencies.indenter]
version = "0.3.0"
[dependencies.once_cell]
version = "1.18.0"
[dependencies.pyo3]
version = "0.20"
optional = true
default-features = false
[dev-dependencies.anyhow]
version = "1.0.28"
[dev-dependencies.backtrace]
version = "0.3.46"
[dev-dependencies.futures]
version = "0.3"
default-features = false
[dev-dependencies.pyo3]
version = "0.20"
features = ["auto-initialize"]
default-features = false
[dev-dependencies.rustversion]
version = "1.0"
[dev-dependencies.syn]
version = "2.0"
features = ["full"]
[dev-dependencies.thiserror]
version = "1.0"
[dev-dependencies.trybuild]
version = "1.0.19"
features = ["diff"]
[features]
auto-install = []
default = [
"auto-install",
"track-caller",
]
track-caller = []

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

Some files were not shown because too many files have changed in this diff Show more