diff --git a/components/filecoin/rs/BUILD.gn b/components/filecoin/rs/BUILD.gn index 09c5028b15f6..e13e0deab388 100644 --- a/components/filecoin/rs/BUILD.gn +++ b/components/filecoin/rs/BUILD.gn @@ -19,12 +19,12 @@ rust_static_library("rust_lib") { deps = [ "//brave/third_party/rust/blake2b_simd/v1:lib", "//brave/third_party/rust/bls_signatures/v0_12:lib", - "//brave/third_party/rust/cid/v0_10:lib", + "//brave/third_party/rust/cid/v0_11:lib", "//brave/third_party/rust/forest_bigint/v0_1:lib", - "//brave/third_party/rust/fvm_ipld_encoding/v0_4:lib", - "//brave/third_party/rust/fvm_shared/v3:lib", + "//brave/third_party/rust/fvm_ipld_encoding/v0_5:lib", + "//brave/third_party/rust/fvm_shared/v4:lib", "//brave/third_party/rust/libsecp256k1/v0_7:lib", - "//brave/third_party/rust/multihash/v0_18:lib", + "//brave/third_party/rust/multihash_codetable/v0_1:lib", "//brave/third_party/rust/thiserror/v1:lib", "//third_party/rust/base64/v0_13:lib", "//third_party/rust/serde/v1:lib", diff --git a/components/filecoin/rs/Cargo.toml b/components/filecoin/rs/Cargo.toml index 7237affaa2c8..f76ec5680270 100644 --- a/components/filecoin/rs/Cargo.toml +++ b/components/filecoin/rs/Cargo.toml @@ -9,16 +9,16 @@ license = "MPL-2.0" base64 = "0.13.0" bls-signatures = { version = "0.12", default-features = false, features = ["pairing"] } cxx = "1" -fvm_shared = { version = "3.10.0" } +fvm_shared = { version = "4.5.1" } libsecp256k1 = "0.7" num_bigint_chainsafe = { package = "forest_bigint", version = "0.1.2"} serde = { version = "^1.0.117", features = ["derive"] } serde_json = "^1.0.59" thiserror = "^1.0.30" blake2b_simd = "1" -cid = { version = "0.10", default-features = false } -multihash = { version = "0.18.0", default-features = false, features = [ "multihash-impl", "blake2b"] } -fvm_ipld_encoding = "0.4" +cid = { version = "^0.11.1", default-features = false } +multihash-codetable = { version = "0.1.4", default-features = false } +fvm_ipld_encoding = "0.5.1" [lib] name = "filecoin_cxx" diff --git a/components/filecoin/rs/src/signature.rs b/components/filecoin/rs/src/signature.rs index ab3f38fe1985..0a172254b5c1 100644 --- a/components/filecoin/rs/src/signature.rs +++ b/components/filecoin/rs/src/signature.rs @@ -7,14 +7,14 @@ use crate::message::MessageAPI; use blake2b_simd::Params; use bls_signatures::Serialize; use core::{array::TryFromSliceError, num::ParseIntError}; -use fvm_ipld_encoding::to_vec; use fvm_ipld_encoding::DAG_CBOR; -use fvm_shared::address::set_current_network; +use fvm_ipld_encoding::to_vec; use fvm_shared::address::Network; +use fvm_shared::address::set_current_network; use fvm_shared::crypto::signature::Signature; use fvm_shared::message::Message as UnsignedMessage; use libsecp256k1::util::{SECRET_KEY_SIZE, SIGNATURE_SIZE}; -use multihash::{Code, MultihashDigest}; +use multihash_codetable::{Code, MultihashDigest}; use thiserror::Error; pub struct PrivateKey(pub [u8; SECRET_KEY_SIZE]); diff --git a/patches/tools-crates-gnrt-lib-readme.rs.patch b/patches/tools-crates-gnrt-lib-readme.rs.patch index 8430d3817656..b7eceb4e52bf 100644 --- a/patches/tools-crates-gnrt-lib-readme.rs.patch +++ b/patches/tools-crates-gnrt-lib-readme.rs.patch @@ -1,5 +1,5 @@ diff --git a/tools/crates/gnrt/lib/readme.rs b/tools/crates/gnrt/lib/readme.rs -index 1c7e5fc10fb9b0a8ebc6b3b65431e9a65ece20e9..29928d1201d545ccbe6ba2201095dc0c05c3a79e 100644 +index 1c7e5fc10fb9b0a8ebc6b3b65431e9a65ece20e9..c330f3a3589ead0ccd81f5efeb4f5ce8b219c605 100644 --- a/tools/crates/gnrt/lib/readme.rs +++ b/tools/crates/gnrt/lib/readme.rs @@ -150,7 +150,7 @@ pub fn readme_file_from_package<'a>( @@ -16,11 +16,11 @@ index 1c7e5fc10fb9b0a8ebc6b3b65431e9a65ece20e9..29928d1201d545ccbe6ba2201095dc0c // Allowed licenses, in the format they are specified in Cargo.toml files from // crates.io, and the format to write to README.chromium. -static ALLOWED_LICENSES: [(&str, &str); 21] = [ -+static ALLOWED_LICENSES: [(&str, &str); 25] = [ ++static ALLOWED_LICENSES: [(&str, &str); 26] = [ // ("Cargo.toml string", "License for README.chromium") ("Apache-2.0", "Apache 2.0"), ("MIT OR Apache-2.0", "Apache 2.0"), -@@ -235,6 +235,10 @@ static ALLOWED_LICENSES: [(&str, &str); 21] = [ +@@ -235,6 +235,11 @@ static ALLOWED_LICENSES: [(&str, &str); 21] = [ ), ("Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT", "Apache 2.0"), ("BSD-2-Clause OR Apache-2.0 OR MIT", "Apache 2.0"), @@ -28,6 +28,7 @@ index 1c7e5fc10fb9b0a8ebc6b3b65431e9a65ece20e9..29928d1201d545ccbe6ba2201095dc0c + ("MIT OR Apache-2.0 OR BSD-1-Clause", "Apache 2.0"), + ("BSD-2-Clause", "BSD 2-Clause"), + ("MPL-2.0", "Mozilla Public License 2.0"), ++ ("CC0-1.0 OR Apache-2.0", "Apache 2.0"), ]; static EXPECTED_LICENSE_FILE: [(&str, &str); 21] = [ diff --git a/third_party/rust/chromium_crates_io/Cargo.lock b/third_party/rust/chromium_crates_io/Cargo.lock index 240ddc6bdb09..36e7c60eac7f 100644 --- a/third_party/rust/chromium_crates_io/Cargo.lock +++ b/third_party/rust/chromium_crates_io/Cargo.lock @@ -354,7 +354,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64", - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", "curve25519-dalek", "digest 0.10.7", "hmac 0.12.1", @@ -409,7 +409,7 @@ dependencies = [ [[package]] name = "cid" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core2", @@ -499,7 +499,7 @@ dependencies = [ [[package]] name = "crypto-mac" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "generic-array", @@ -732,6 +732,11 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "feed-rs" version = "1.3.0" @@ -804,7 +809,7 @@ dependencies = [ "fvm_ipld_encoding", "fvm_shared", "libsecp256k1", - "multihash", + "multihash-codetable", "serde", "serde_json", "thiserror", @@ -957,23 +962,23 @@ dependencies = [ [[package]] name = "fvm_ipld_blockstore" -version = "0.2.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "anyhow", "cid", - "multihash", + "multihash-codetable", ] [[package]] name = "fvm_ipld_encoding" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "anyhow", "cid", "fvm_ipld_blockstore", - "multihash", + "multihash-codetable", "serde", "serde_ipld_dagcbor", "serde_repr", @@ -983,7 +988,7 @@ dependencies = [ [[package]] name = "fvm_shared" -version = "3.10.0" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "anyhow", @@ -994,7 +999,6 @@ dependencies = [ "data-encoding-macro", "fvm_ipld_encoding", "lazy_static", - "multihash", "num-bigint 0.4.3", "num-derive", "num-integer", @@ -1107,6 +1111,11 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "hex" version = "0.4.3" @@ -1134,7 +1143,7 @@ name = "hmac" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crypto-mac 0.11.1", + "crypto-mac 0.11.0", "digest 0.9.0", ] @@ -1233,7 +1242,16 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "equivalent", + "hashbrown 0.15.1", ] [[package]] @@ -1244,6 +1262,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ipld-core" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cid", + "serde", + "serde_bytes", +] + [[package]] name = "itertools" version = "0.10.5" @@ -1307,7 +1335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cssparser 0.27.2", "html5ever", - "indexmap", + "indexmap 1.9.3", "matches", "selectors 0.22.0", ] @@ -1412,7 +1440,7 @@ dependencies = [ "cfg-if", "cssparser 0.27.2", "encoding_rs", - "hashbrown", + "hashbrown 0.12.3", "lazy_static", "lazycell", "memchr", @@ -1481,27 +1509,43 @@ dependencies = [ [[package]] name = "multihash" -version = "0.18.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "blake2b_simd", "core2", - "multihash-derive", "serde", - "serde-big-array", "unsigned-varint", ] +[[package]] +name = "multihash-codetable" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "blake2b_simd", + "core2", + "multihash-derive", +] + [[package]] name = "multihash-derive" -version = "0.8.1" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core2", + "multihash", + "multihash-derive-impl", +] + +[[package]] +name = "multihash-derive-impl" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro-crate", - "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.79", "synstructure 0.13.1", ] @@ -1823,33 +1867,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "proc-macro-crate" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2", - "quote", - "version_check", + "toml_edit", ] [[package]] @@ -2111,14 +2132,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "serde-big-array" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "serde", -] - [[package]] name = "serde_bytes" version = "0.11.9" @@ -2138,11 +2151,11 @@ dependencies = [ [[package]] name = "serde_ipld_dagcbor" -version = "0.4.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cbor4ii", - "cid", + "ipld-core", "scopeguard", "serde", ] @@ -2412,7 +2425,7 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.1" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2452,7 +2465,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.79", ] [[package]] @@ -2477,7 +2490,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "thiserror-impl", @@ -2485,12 +2498,12 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.79", ] [[package]] @@ -2544,11 +2557,18 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "toml" -version = "0.5.9" +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "toml_edit" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde", + "indexmap 2.6.0", + "toml_datetime", + "winnow", ] [[package]] @@ -2640,7 +2660,7 @@ dependencies = [ [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2824,6 +2844,14 @@ name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "winnow" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr", +] + [[package]] name = "wyz" version = "0.5.1" @@ -2908,7 +2936,7 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.5" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "zeroize_derive", @@ -2922,7 +2950,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] diff --git a/third_party/rust/chromium_crates_io/gnrt_config.toml b/third_party/rust/chromium_crates_io/gnrt_config.toml index 6443dbeca876..b457ab035b47 100644 --- a/third_party/rust/chromium_crates_io/gnrt_config.toml +++ b/third_party/rust/chromium_crates_io/gnrt_config.toml @@ -120,6 +120,9 @@ extra_input_roots = [ "../README.md" ] extra_input_roots = ['../doc', '../README.md' ] license_files = ['LICENSE.txt'] +[crate.cid] +license_files = ['LICENSE'] + [crate.const-oid] extra_input_roots = [ "../README.md" ] @@ -246,6 +249,9 @@ license_files = ['../../../../../common/licenses/Apache-2.0'] extra_build_script_src_roots = [ "../macros/match_token.rs" ] build_script_outputs = [ "rules.rs" ] +[crate.ipld-core] +extra_input_roots = [ "../README.md" ] + [crate.itoa] license_files = ['LICENSE-APACHE'] @@ -275,7 +281,19 @@ extra_build_script_src_roots = [ "entities.rs" ] build_script_outputs = [ "generated.rs", "named_entities.rs", ] [crate.multihash] -extra_kv = { rustenv = [ "CARGO_MANIFEST_DIR=\" + rebase_path(\"//brave/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1\") + \"" ]} +extra_kv = { rustenv = [ "CARGO_MANIFEST_DIR=\" + rebase_path(\"//brave/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2\") + \"" ]} + +[crate.multihash-codetable] +license_files = ['LICENSE'] +extra_kv = { rustenv = [ + "CARGO=\" + rebase_path(\"//third_party/rust-toolchain/bin/cargo\") + \"", + "CARGO_MANIFEST_DIR=\" + rebase_path(\"//brave/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4\") + \"" ]} + +[crate.multihash-derive] +license_files = ['LICENSE'] + +[crate.multihash-derive-impl] +license_files = ['LICENSE'] [crate.num-bigint] extra_input_roots = [ "../README.md" ] @@ -416,6 +434,9 @@ license_files = ['license-apache-2.0'] [crate.windows-targets] license_files = ['license-apache-2.0'] +[crate.winnow] +extra_src_roots = ['../examples'] + [crate.wyz] license_files = ['LICENSE.txt'] diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.cargo_vcs_info.json deleted file mode 100644 index 0db135b24ad9..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "1df4e3fe0f6bcb8845655d2ccefd9da5ef81a1cd" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/workflows/build.yml b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/workflows/build.yml deleted file mode 100644 index 20260988d503..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/workflows/build.yml +++ /dev/null @@ -1,112 +0,0 @@ -name: build - -on: [push, pull_request] - -jobs: - build: - name: Build - strategy: - fail-fast: false - matrix: - platform: [ubuntu-latest, macos-latest, windows-latest] - toolchain: [stable] - runs-on: ${{ matrix.platform }} - - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Cache Dependencies & Build Outputs - uses: actions/cache@v3 - with: - path: ~/.cargo - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.toolchain }} - override: true - components: rustfmt, clippy - - - name: Check Code Format - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - - name: Code Lint - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-targets --all-features -- -D warnings - - - name: Test - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features - - build-no-std: - name: Build no_std - runs-on: ubuntu-latest - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - target: thumbv6m-none-eabi - - - name: Build - uses: actions-rs/cargo@v1 - with: - command: build - args: --no-default-features --target thumbv6m-none-eabi - - build-no-std-serde: - name: Build no_std, but with `serde-codec` feature enabled - runs-on: ubuntu-latest - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - - name: Build - uses: actions-rs/cargo@v1 - with: - command: build - # `thumbv6m-none-eabi` can't be used as Serde doesn't compile there. - args: --no-default-features --features serde-codec - - coverage: - name: Code Coverage - runs-on: ubuntu-latest - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - - - name: Generate Code Coverage - uses: actions-rs/tarpaulin@v0.1 - with: - args: --all-features - - - name: Upload Code Coverage - uses: codecov/codecov-action@v3 diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/CHANGELOG.md deleted file mode 100644 index 1ac2f209b7ff..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/CHANGELOG.md +++ /dev/null @@ -1,25 +0,0 @@ -# [v0.10.1](https://github.com/multiformats/rust-cid/compare/v0.10.0...v0.10.1) (2023-01-09) - - -### Bug Fixes - -* the arb feature needs more multihash features ([#133](https://github.com/multiformats/rust-cid/issues/133)) ([ceca4d9](https://github.com/multiformats/rust-cid/commit/ceca4d93bd90f8ac30987bcc5814f6a655484787)) - - -# [v0.10.0](https://github.com/multiformats/rust-cid/compare/v0.9.0...v0.10.0) (2022-12-22) - - -### chore - -* upgrade to Rust edition 2021 and set MSRV ([#130](https://github.com/multiformats/rust-cid/issues/130)) ([91fd35e](https://github.com/multiformats/rust-cid/commit/91fd35e06f8ae24d66f6ba4598830d8dbc259c8a)) - - -### Features - -* add `encoded_len` and written bytes ([#129](https://github.com/multiformats/rust-cid/issues/129)) ([715771c](https://github.com/multiformats/rust-cid/commit/715771c48fd47969e733ed1faad8b82d9ddbd7ca)) - - -### BREAKING CHANGES - -* Return `Result` (instead of `Result<()>`) now from `Cid::write_bytes`. -* Rust edition 2021 is now used diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/Cargo.toml deleted file mode 100644 index 969f65ee6f01..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/Cargo.toml +++ /dev/null @@ -1,113 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.60" -name = "cid" -version = "0.10.1" -authors = ["Friedel Ziegelmayer "] -description = "CID in rust" -homepage = "https://github.com/multiformats/rust-cid" -readme = "README.md" -keywords = [ - "ipld", - "ipfs", - "cid", - "multihash", - "multiformats", -] -license = "MIT" - -[dependencies.arbitrary] -version = "1.1.0" -optional = true - -[dependencies.core2] -version = "0.4" -default-features = false - -[dependencies.multibase] -version = "0.9.1" -optional = true -default-features = false - -[dependencies.multihash] -version = "0.18.0" -default-features = false - -[dependencies.parity-scale-codec] -version = "3.0.0" -features = ["derive"] -optional = true -default-features = false - -[dependencies.quickcheck] -version = "1.0" -optional = true - -[dependencies.rand] -version = "0.8.5" -features = ["small_rng"] -optional = true - -[dependencies.serde] -version = "1.0.116" -optional = true -default-features = false - -[dependencies.serde_bytes] -version = "0.11.5" -optional = true - -[dependencies.unsigned-varint] -version = "0.7.0" -default-features = false - -[dev-dependencies.serde_json] -version = "1.0.59" - -[features] -alloc = [ - "multibase", - "multihash/alloc", - "core2/alloc", - "serde/alloc", -] -arb = [ - "quickcheck", - "rand", - "multihash/arb", - "multihash/multihash-impl", - "multihash/sha2", - "arbitrary", -] -default = [ - "std", - "multihash/default", -] -scale-codec = [ - "parity-scale-codec", - "multihash/scale-codec", -] -serde-codec = [ - "alloc", - "serde", - "multihash/serde-codec", - "serde_bytes", -] -std = [ - "multihash/std", - "unsigned-varint/std", - "alloc", - "multibase/std", - "serde/std", -] diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/Cargo.toml.orig deleted file mode 100644 index 2cb24274d653..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/Cargo.toml.orig +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "cid" -version = "0.10.1" -description = "CID in rust" -homepage = "https://github.com/multiformats/rust-cid" -authors = ["Friedel Ziegelmayer "] -keywords = ["ipld", "ipfs", "cid", "multihash", "multiformats"] -license = "MIT" -readme = "README.md" -edition = "2021" -rust-version = "1.60" - -[features] -default = ["std", "multihash/default"] -std = ["multihash/std", "unsigned-varint/std", "alloc", "multibase/std", "serde/std"] -alloc = ["multibase", "multihash/alloc", "core2/alloc", "serde/alloc"] -arb = ["quickcheck", "rand", "multihash/arb", "multihash/multihash-impl", "multihash/sha2", "arbitrary"] -scale-codec = ["parity-scale-codec", "multihash/scale-codec"] -serde-codec = ["alloc", "serde", "multihash/serde-codec", "serde_bytes"] - -[dependencies] -multihash = { version = "0.18.0", default-features = false } -unsigned-varint = { version = "0.7.0", default-features = false } - -multibase = { version = "0.9.1", optional = true, default-features = false } -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"], optional = true } -quickcheck = { version = "1.0", optional = true } -rand = { version = "0.8.5", optional = true, features = ["small_rng"]} -serde = { version = "1.0.116", default-features = false, optional = true } -serde_bytes = { version = "0.11.5", optional = true } -arbitrary = { version = "1.1.0", optional = true } - -core2 = { version = "0.4", default-features = false } - -[dev-dependencies] -serde_json = "1.0.59" - diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/README.md b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/README.md deleted file mode 100644 index e49a85e6bb9b..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# rust-cid - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) -[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) - -[![Build Status](https://github.com/multiformats/rust-cid/workflows/build/badge.svg)](https://github.com/multiformats/rust-cid/actions) -[![Crates.io](https://img.shields.io/crates/v/cid?style=flat-square)](https://crates.io/crates/cid) -[![License](https://img.shields.io/crates/l/cid?style=flat-square)](LICENSE) -[![Documentation](https://docs.rs/cid/badge.svg?style=flat-square)](https://docs.rs/cid) -[![Dependency Status](https://deps.rs/repo/github/multiformats/rust-cid/status.svg)](https://deps.rs/repo/github/multiformats/rust-cid) -[![Coverage Status](https://img.shields.io/codecov/c/github/multiformats/rust-cid?style=flat-square)](https://codecov.io/gh/multiformats/rust-cid) - -> [CID](https://github.com/ipld/cid) implementation in Rust. - -## Table of Contents - -- [Usage](#usage) -- [Testing](#testing) -- [Maintainers](#maintainers) -- [Contribute](#contribute) -- [License](#license) - -## Usage - -```rust -use cid::multihash::{Code, MultihashDigest}; -use cid::Cid; -use std::convert::TryFrom; - -const RAW: u64 = 0x55; - -fn main() { - let h = Code::Sha2_256.digest(b"beep boop"); - - let cid = Cid::new_v1(RAW, h); - - let data = cid.to_bytes(); - let out = Cid::try_from(data).unwrap(); - - assert_eq!(cid, out); - - let cid_string = cid.to_string(); - assert_eq!( - cid_string, - "bafkreieq5jui4j25lacwomsqgjeswwl3y5zcdrresptwgmfylxo2depppq" - ); - println!("{}", cid_string); -} -``` - -Your `Cargo.toml` needs these dependencies: - -```toml -[dependencies] -cid = "0.7.0" -``` - -You can also run this example from this checkout with `cargo run --example readme`. - -## Testing - -You can run the tests using this command: `cargo test --all-features` - -## Maintainers - -Captain: [@dignifiedquire](https://github.com/dignifiedquire). - -## Contribute - -Contributions welcome. Please check out [the issues](https://github.com/multiformats/rust-cid/issues). - -Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - -Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - -## License - -[MIT](LICENSE) © 2017 Friedel Ziegelmayer diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/examples/readme.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/examples/readme.rs deleted file mode 100644 index 81308471be57..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/examples/readme.rs +++ /dev/null @@ -1,23 +0,0 @@ -use cid::multihash::{Code, MultihashDigest}; -use cid::Cid; -use std::convert::TryFrom; - -const RAW: u64 = 0x55; - -fn main() { - let h = Code::Sha2_256.digest(b"beep boop"); - - let cid = Cid::new_v1(RAW, h); - - let data = cid.to_bytes(); - let out = Cid::try_from(data).unwrap(); - - assert_eq!(cid, out); - - let cid_string = cid.to_string(); - assert_eq!( - cid_string, - "bafkreieq5jui4j25lacwomsqgjeswwl3y5zcdrresptwgmfylxo2depppq" - ); - println!("{}", cid_string); -} diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/arb.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/arb.rs deleted file mode 100644 index 63a69e8c1885..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/arb.rs +++ /dev/null @@ -1,118 +0,0 @@ -#![cfg(feature = "arb")] - -use std::convert::TryFrom; - -use multihash::{Code, MultihashDigest, MultihashGeneric}; -use quickcheck::Gen; -use rand::{ - distributions::{weighted::WeightedIndex, Distribution}, - Rng, -}; - -use arbitrary::{size_hint, Unstructured}; -use rand::SeedableRng; - -use crate::{CidGeneric, Version}; - -impl quickcheck::Arbitrary for Version { - fn arbitrary(g: &mut Gen) -> Self { - let version = u64::from(bool::arbitrary(g)); - Version::try_from(version).unwrap() - } -} - -impl quickcheck::Arbitrary for CidGeneric { - fn arbitrary(g: &mut Gen) -> Self { - if S >= 32 && Version::arbitrary(g) == Version::V0 { - let data: Vec = Vec::arbitrary(g); - let hash = Code::Sha2_256 - .digest(&data) - .resize() - .expect("digest too large"); - CidGeneric::new_v0(hash).expect("sha2_256 is a valid hash for cid v0") - } else { - // In real world lower IPLD Codec codes more likely to happen, hence distribute them - // with bias towards smaller values. - let weights = [128, 32, 4, 4, 2, 2, 1, 1]; - let dist = WeightedIndex::new(weights.iter()).unwrap(); - let mut rng = rand::rngs::SmallRng::seed_from_u64(u64::arbitrary(g)); - let codec = match dist.sample(&mut rng) { - 0 => rng.gen_range(0..u64::pow(2, 7)), - 1 => rng.gen_range(u64::pow(2, 7)..u64::pow(2, 14)), - 2 => rng.gen_range(u64::pow(2, 14)..u64::pow(2, 21)), - 3 => rng.gen_range(u64::pow(2, 21)..u64::pow(2, 28)), - 4 => rng.gen_range(u64::pow(2, 28)..u64::pow(2, 35)), - 5 => rng.gen_range(u64::pow(2, 35)..u64::pow(2, 42)), - 6 => rng.gen_range(u64::pow(2, 42)..u64::pow(2, 49)), - 7 => rng.gen_range(u64::pow(2, 56)..u64::pow(2, 63)), - _ => unreachable!(), - }; - - let hash: MultihashGeneric = quickcheck::Arbitrary::arbitrary(g); - CidGeneric::new_v1(codec, hash) - } - } -} - -impl<'a, const S: usize> arbitrary::Arbitrary<'a> for CidGeneric { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - if S >= 32 && u.ratio(1, 10)? { - let mh = MultihashGeneric::wrap(Code::Sha2_256.into(), u.bytes(32)?).unwrap(); - return Ok(CidGeneric::new_v0(mh).expect("32 bytes is correct for v0")); - } - - let mut codec = 0u64; - let mut len_choice = u.arbitrary::()? | 1; - - while len_choice & 1 == 1 { - len_choice >>= 1; - - let x = u.arbitrary::(); - let next = codec - .checked_shl(8) - .zip(x.ok()) - .map(|(next, x)| next.saturating_add(x as u64)); - - match next { - None => break, - Some(next) => codec = next, - } - } - - Ok(CidGeneric::new_v1(codec, u.arbitrary()?)) - } - - fn size_hint(depth: usize) -> (usize, Option) { - let v1 = size_hint::and_all(&[ - <[u8; 2]>::size_hint(depth), - (0, Some(8)), - as arbitrary::Arbitrary>::size_hint(depth), - ]); - if S >= 32 { - size_hint::and(::size_hint(depth), size_hint::or((32, Some(32)), v1)) - } else { - v1 - } - } -} - -#[cfg(test)] -mod tests { - use crate::CidGeneric; - use arbitrary::{Arbitrary, Unstructured}; - use multihash::MultihashGeneric; - - #[test] - fn arbitrary() { - let mut u = Unstructured::new(&[ - 1, 22, 41, 13, 5, 6, 7, 8, 9, 6, 10, 243, 43, 231, 123, 43, 153, 127, 67, 76, 24, 91, - 23, 32, 32, 23, 65, 98, 193, 108, 3, - ]); - let c = as Arbitrary>::arbitrary(&mut u).unwrap(); - let c2 = - CidGeneric::<16>::new_v1(22, MultihashGeneric::wrap(13, &[6, 7, 8, 9, 6]).unwrap()); - assert_eq!(c.hash(), c2.hash()); - assert_eq!(c.codec(), c2.codec()); - assert_eq!(c, c2) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/cid.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/cid.rs deleted file mode 100644 index 95614cd24fe0..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/cid.rs +++ /dev/null @@ -1,424 +0,0 @@ -//! This module contains the main CID type. -//! -//! If you are an application developer you likely won't use the `Cid` which is generic over the -//! digest size. Intead you would use the concrete top-level `Cid` type. -//! -//! As a library author that works with CIDs that should support hashes of anysize, you would -//! import the `Cid` type from this module. -use core::convert::TryFrom; - -#[cfg(feature = "alloc")] -use multibase::{encode as base_encode, Base}; - -use multihash::MultihashGeneric as Multihash; -use unsigned_varint::encode as varint_encode; - -#[cfg(feature = "alloc")] -extern crate alloc; - -#[cfg(feature = "alloc")] -use alloc::{ - borrow, - string::{String, ToString}, - vec::Vec, -}; - -#[cfg(feature = "std")] -pub(crate) use unsigned_varint::io::read_u64 as varint_read_u64; - -/// Reads 64 bits from a byte array into a u64 -/// Adapted from unsigned-varint's generated read_u64 function at -/// https://github.com/paritytech/unsigned-varint/blob/master/src/io.rs -#[cfg(not(feature = "std"))] -pub(crate) fn varint_read_u64(mut r: R) -> Result { - use unsigned_varint::decode; - let mut b = varint_encode::u64_buffer(); - for i in 0..b.len() { - let n = r.read(&mut (b[i..i + 1]))?; - if n == 0 { - return Err(Error::VarIntDecodeError); - } else if decode::is_last(b[i]) { - return Ok(decode::u64(&b[..=i]).unwrap().0); - } - } - Err(Error::VarIntDecodeError) -} - -#[cfg(feature = "std")] -use std::io; - -#[cfg(not(feature = "std"))] -use core2::io; - -use crate::error::{Error, Result}; -use crate::version::Version; - -/// DAG-PB multicodec code -const DAG_PB: u64 = 0x70; -/// The SHA_256 multicodec code -const SHA2_256: u64 = 0x12; - -/// Representation of a CID. -/// -/// The generic is about the allocated size of the multihash. -#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "scale-codec", derive(parity_scale_codec::Decode))] -#[cfg_attr(feature = "scale-codec", derive(parity_scale_codec::Encode))] -pub struct Cid { - /// The version of CID. - version: Version, - /// The codec of CID. - codec: u64, - /// The multihash of CID. - hash: Multihash, -} - -impl Cid { - /// Create a new CIDv0. - pub const fn new_v0(hash: Multihash) -> Result { - if hash.code() != SHA2_256 || hash.size() != 32 { - return Err(Error::InvalidCidV0Multihash); - } - Ok(Self { - version: Version::V0, - codec: DAG_PB, - hash, - }) - } - - /// Create a new CIDv1. - pub const fn new_v1(codec: u64, hash: Multihash) -> Self { - Self { - version: Version::V1, - codec, - hash, - } - } - - /// Create a new CID. - pub const fn new(version: Version, codec: u64, hash: Multihash) -> Result { - match version { - Version::V0 => { - if codec != DAG_PB { - return Err(Error::InvalidCidV0Codec); - } - Self::new_v0(hash) - } - Version::V1 => Ok(Self::new_v1(codec, hash)), - } - } - - /// Convert a CIDv0 to a CIDv1. Returns unchanged if already a CIDv1. - pub fn into_v1(self) -> Result { - match self.version { - Version::V0 => { - if self.codec != DAG_PB { - return Err(Error::InvalidCidV0Codec); - } - Ok(Self::new_v1(self.codec, self.hash)) - } - Version::V1 => Ok(self), - } - } - - /// Returns the cid version. - pub const fn version(&self) -> Version { - self.version - } - - /// Returns the cid codec. - pub const fn codec(&self) -> u64 { - self.codec - } - - /// Returns the cid multihash. - pub const fn hash(&self) -> &Multihash { - &self.hash - } - - /// Reads the bytes from a byte stream. - pub fn read_bytes(mut r: R) -> Result { - let version = varint_read_u64(&mut r)?; - let codec = varint_read_u64(&mut r)?; - - // CIDv0 has the fixed `0x12 0x20` prefix - if [version, codec] == [0x12, 0x20] { - let mut digest = [0u8; 32]; - r.read_exact(&mut digest)?; - let mh = Multihash::wrap(version, &digest).expect("Digest is always 32 bytes."); - return Self::new_v0(mh); - } - - let version = Version::try_from(version)?; - match version { - Version::V0 => Err(Error::InvalidExplicitCidV0), - Version::V1 => { - let mh = Multihash::read(r)?; - Self::new(version, codec, mh) - } - } - } - - fn write_bytes_v1(&self, mut w: W) -> Result { - let mut version_buf = varint_encode::u64_buffer(); - let version = varint_encode::u64(self.version.into(), &mut version_buf); - - let mut codec_buf = varint_encode::u64_buffer(); - let codec = varint_encode::u64(self.codec, &mut codec_buf); - - let mut written = version.len() + codec.len(); - - w.write_all(version)?; - w.write_all(codec)?; - written += self.hash.write(&mut w)?; - - Ok(written) - } - - /// Writes the bytes to a byte stream, returns the number of bytes written. - pub fn write_bytes(&self, w: W) -> Result { - let written = match self.version { - Version::V0 => self.hash.write(w)?, - Version::V1 => self.write_bytes_v1(w)?, - }; - Ok(written) - } - - /// Returns the length in bytes needed to encode this cid into bytes. - pub fn encoded_len(&self) -> usize { - match self.version { - Version::V0 => self.hash.encoded_len(), - Version::V1 => { - let mut version_buf = varint_encode::u64_buffer(); - let version = varint_encode::u64(self.version.into(), &mut version_buf); - - let mut codec_buf = varint_encode::u64_buffer(); - let codec = varint_encode::u64(self.codec, &mut codec_buf); - - version.len() + codec.len() + self.hash.encoded_len() - } - } - } - - /// Returns the encoded bytes of the `Cid`. - #[cfg(feature = "alloc")] - pub fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - let written = self.write_bytes(&mut bytes).unwrap(); - debug_assert_eq!(written, bytes.len()); - bytes - } - - #[cfg(feature = "alloc")] - #[allow(clippy::wrong_self_convention)] - fn to_string_v0(&self) -> String { - Base::Base58Btc.encode(self.hash.to_bytes()) - } - - #[cfg(feature = "alloc")] - #[allow(clippy::wrong_self_convention)] - fn to_string_v1(&self) -> String { - multibase::encode(Base::Base32Lower, self.to_bytes().as_slice()) - } - - /// Convert CID into a multibase encoded string - /// - /// # Example - /// - /// ``` - /// use cid::Cid; - /// use multibase::Base; - /// use multihash::{Code, MultihashDigest}; - /// - /// const RAW: u64 = 0x55; - /// - /// let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); - /// let encoded = cid.to_string_of_base(Base::Base64).unwrap(); - /// assert_eq!(encoded, "mAVUSICwmtGto/8aP+ZtFPB0wQTQTQi1wZIO/oPmKXohiZueu"); - /// ``` - #[cfg(feature = "alloc")] - pub fn to_string_of_base(&self, base: Base) -> Result { - match self.version { - Version::V0 => { - if base == Base::Base58Btc { - Ok(self.to_string_v0()) - } else { - Err(Error::InvalidCidV0Base) - } - } - Version::V1 => Ok(base_encode(base, self.to_bytes())), - } - } -} - -impl Default for Cid { - fn default() -> Self { - Self { - version: Version::V1, - codec: 0, - hash: Multihash::::default(), - } - } -} - -// TODO: remove the dependency on alloc by fixing -// https://github.com/multiformats/rust-multibase/issues/33 -#[cfg(feature = "alloc")] -impl core::fmt::Display for Cid { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let output = match self.version { - Version::V0 => self.to_string_v0(), - Version::V1 => self.to_string_v1(), - }; - write!(f, "{}", output) - } -} - -#[cfg(feature = "alloc")] -impl core::fmt::Debug for Cid { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - if f.alternate() { - f.debug_struct("Cid") - .field("version", &self.version()) - .field("codec", &self.codec()) - .field("hash", self.hash()) - .finish() - } else { - let output = match self.version { - Version::V0 => self.to_string_v0(), - Version::V1 => self.to_string_v1(), - }; - write!(f, "Cid({})", output) - } - } -} - -#[cfg(feature = "alloc")] -impl core::str::FromStr for Cid { - type Err = Error; - - fn from_str(cid_str: &str) -> Result { - Self::try_from(cid_str) - } -} - -#[cfg(feature = "alloc")] -impl TryFrom for Cid { - type Error = Error; - - fn try_from(cid_str: String) -> Result { - Self::try_from(cid_str.as_str()) - } -} - -#[cfg(feature = "alloc")] -impl TryFrom<&str> for Cid { - type Error = Error; - - fn try_from(cid_str: &str) -> Result { - static IPFS_DELIMETER: &str = "/ipfs/"; - - let hash = match cid_str.find(IPFS_DELIMETER) { - Some(index) => &cid_str[index + IPFS_DELIMETER.len()..], - _ => cid_str, - }; - - if hash.len() < 2 { - return Err(Error::InputTooShort); - } - - let decoded = if Version::is_v0_str(hash) { - Base::Base58Btc.decode(hash)? - } else { - let (_, decoded) = multibase::decode(hash)?; - decoded - }; - - Self::try_from(decoded) - } -} - -#[cfg(feature = "alloc")] -impl TryFrom> for Cid { - type Error = Error; - - fn try_from(bytes: Vec) -> Result { - Self::try_from(bytes.as_slice()) - } -} - -impl TryFrom<&[u8]> for Cid { - type Error = Error; - - fn try_from(mut bytes: &[u8]) -> Result { - Self::read_bytes(&mut bytes) - } -} - -impl From<&Cid> for Cid { - fn from(cid: &Cid) -> Self { - *cid - } -} - -#[cfg(feature = "alloc")] -impl From> for Vec { - fn from(cid: Cid) -> Self { - cid.to_bytes() - } -} - -#[cfg(feature = "alloc")] -impl From> for String { - fn from(cid: Cid) -> Self { - cid.to_string() - } -} - -#[cfg(feature = "alloc")] -impl<'a, const S: usize> From> for borrow::Cow<'a, Cid> { - fn from(from: Cid) -> Self { - borrow::Cow::Owned(from) - } -} - -#[cfg(feature = "alloc")] -impl<'a, const S: usize> From<&'a Cid> for borrow::Cow<'a, Cid> { - fn from(from: &'a Cid) -> Self { - borrow::Cow::Borrowed(from) - } -} - -#[cfg(test)] -mod tests { - #[test] - #[cfg(feature = "scale-codec")] - fn test_cid_scale_codec() { - use super::Cid; - use parity_scale_codec::{Decode, Encode}; - - let cid = Cid::<64>::default(); - let bytes = cid.encode(); - let cid2 = Cid::decode(&mut &bytes[..]).unwrap(); - assert_eq!(cid, cid2); - } - - #[test] - #[cfg(feature = "std")] - fn test_debug_instance() { - use super::Cid; - use std::str::FromStr; - let cid = - Cid::<64>::from_str("bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4") - .unwrap(); - // short debug - assert_eq!( - &format!("{:?}", cid), - "Cid(bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4)" - ); - // verbose debug - let mut txt = format!("{:#?}", cid); - txt.retain(|c| !c.is_whitespace()); - assert_eq!(&txt, "Cid{version:V1,codec:113,hash:Multihash{code:18,size:32,digest:[41,119,46,195,0,149,81,168,63,176,40,43,118,60,191,149,226,240,10,35,152,172,31,178,232,48,180,238,36,196,112,55,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,],},}"); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/lib.rs deleted file mode 100644 index ee839d1c7c29..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! # cid -//! -//! Implementation of [cid](https://github.com/ipld/cid) in Rust. - -#![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -mod cid; -mod error; -mod version; - -#[cfg(any(test, feature = "arb"))] -mod arb; -#[cfg(feature = "serde-codec")] -pub mod serde; - -pub use self::cid::Cid as CidGeneric; -pub use self::error::{Error, Result}; -pub use self::version::Version; - -#[cfg(feature = "alloc")] -pub use multibase; -pub use multihash; - -/// A Cid that contains a multihash with an allocated size of 512 bits. -/// -/// This is the same digest size the default multihash code table has. -/// -/// If you need a CID that is generic over its digest size, use [`CidGeneric`] instead. -pub type Cid = CidGeneric<64>; diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/tests/lib.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/tests/lib.rs deleted file mode 100644 index eb807120bd19..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/tests/lib.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::collections::HashMap; -use std::convert::{TryFrom, TryInto}; -use std::str::FromStr; - -use cid::{Cid, CidGeneric, Error, Version}; -use multibase::Base; -use multihash::{derive::Multihash, Code, MultihashDigest}; - -const RAW: u64 = 0x55; -const DAG_PB: u64 = 0x70; - -#[test] -fn basic_marshalling() { - let h = Code::Sha2_256.digest(b"beep boop"); - - let cid = Cid::new_v1(DAG_PB, h); - - let data = cid.to_bytes(); - let out = Cid::try_from(data.clone()).unwrap(); - assert_eq!(cid, out); - - let out2 = data.try_into().unwrap(); - assert_eq!(cid, out2); - - let s = cid.to_string(); - let out3 = Cid::try_from(&s[..]).unwrap(); - assert_eq!(cid, out3); - - let out4 = (&s[..]).try_into().unwrap(); - assert_eq!(cid, out4); -} - -#[test] -fn empty_string() { - assert!(matches!(Cid::try_from(""), Err(Error::InputTooShort))) -} - -#[test] -fn v0_handling() { - let old = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"; - let cid = Cid::try_from(old).unwrap(); - - assert_eq!(cid.version(), Version::V0); - assert_eq!(cid.to_string(), old); -} - -#[test] -fn from_str() { - let cid: Cid = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" - .parse() - .unwrap(); - assert_eq!(cid.version(), Version::V0); - - let bad = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zIII".parse::(); - assert!(matches!(bad, Err(Error::ParsingError))); -} - -#[test] -fn v0_error() { - let bad = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zIII"; - assert!(matches!(Cid::try_from(bad), Err(Error::ParsingError))); -} - -#[test] -fn from() { - let the_hash = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"; - - let cases = vec![ - format!("/ipfs/{:}", &the_hash), - format!("https://ipfs.io/ipfs/{:}", &the_hash), - format!("http://localhost:8080/ipfs/{:}", &the_hash), - ]; - - for case in cases { - let cid = Cid::try_from(case).unwrap(); - assert_eq!(cid.version(), Version::V0); - assert_eq!(cid.to_string(), the_hash); - } -} - -#[test] -fn test_hash() { - let data: Vec = vec![1, 2, 3]; - let hash = Code::Sha2_256.digest(&data); - let mut map = HashMap::new(); - let cid = Cid::new_v0(hash).unwrap(); - map.insert(cid, data.clone()); - assert_eq!(&data, map.get(&cid).unwrap()); -} - -#[test] -fn test_base32() { - let cid = Cid::from_str("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); - assert_eq!(cid.version(), Version::V1); - assert_eq!(cid.codec(), RAW); - assert_eq!(cid.hash(), &Code::Sha2_256.digest(b"foo")); -} - -#[test] -fn to_string() { - let expected_cid = "bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy"; - let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); - assert_eq!(cid.to_string(), expected_cid); -} - -#[test] -fn to_string_of_base32() { - let expected_cid = "bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy"; - let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); - assert_eq!( - cid.to_string_of_base(Base::Base32Lower).unwrap(), - expected_cid - ); -} - -#[test] -fn to_string_of_base64() { - let expected_cid = "mAVUSICwmtGto/8aP+ZtFPB0wQTQTQi1wZIO/oPmKXohiZueu"; - let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); - assert_eq!(cid.to_string_of_base(Base::Base64).unwrap(), expected_cid); -} - -#[test] -fn to_string_of_base58_v0() { - let expected_cid = "QmRJzsvyCQyizr73Gmms8ZRtvNxmgqumxc2KUp71dfEmoj"; - let cid = Cid::new_v0(Code::Sha2_256.digest(b"foo")).unwrap(); - assert_eq!( - cid.to_string_of_base(Base::Base58Btc).unwrap(), - expected_cid - ); -} - -#[test] -fn to_string_of_base_v0_error() { - let cid = Cid::new_v0(Code::Sha2_256.digest(b"foo")).unwrap(); - assert!(matches!( - cid.to_string_of_base(Base::Base16Upper), - Err(Error::InvalidCidV0Base) - )); -} - -#[test] -fn explicit_v0_is_disallowed() { - use std::io::Cursor; - assert!(matches!( - Cid::read_bytes(Cursor::new([ - 0x00, 0x70, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12 - ])), - Err(Error::InvalidExplicitCidV0) - )); -} - -#[test] -fn new_v0_accepts_only_32_bytes() { - use multihash::Multihash; - const SHA2_256: u64 = 0x12; - - for i in 1..64 { - if i == 32 { - continue; - } - assert!(matches!( - Cid::new_v0(Multihash::wrap(SHA2_256, &vec![7; i]).unwrap()), - Err(Error::InvalidCidV0Multihash) - )); - } -} - -fn a_function_that_takes_a_generic_cid(cid: &CidGeneric) -> String { - cid.to_string() -} - -// This test is about having something implemented that used the default size of `Cid`. So the code -// is using `Cid` instead of `Cid`. The code will still work with other sizes. -#[test] -fn method_can_take_differently_sized_cids() { - #[derive(Clone, Copy, Debug, Eq, PartialEq, Multihash)] - #[mh(alloc_size = 128)] - enum Code128 { - #[mh(code = 0x12, hasher = multihash::Sha2_256)] - Sha2_256, - } - - let cid_default = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); - let cid_128 = CidGeneric::<128>::new_v1(RAW, Code128::Sha2_256.digest(b"foo")); - - assert_eq!( - a_function_that_takes_a_generic_cid(&cid_default), - a_function_that_takes_a_generic_cid(&cid_128) - ); -} - -#[test] -fn test_into_v1() { - let cid = Cid::from_str("QmTPcW343HGMdoxarwvHHoPhkbo5GfNYjnZkyW5DBtpvLe").unwrap(); - let cid_v1 = cid.into_v1().unwrap(); - assert_eq!(cid_v1.version(), Version::V1); - assert_eq!( - cid_v1.to_string(), - "bafybeiclbsxcvqpfliqcejqz5ghpvw4r7vktjkyk3ruvjvdmam5azct2v4" - ); - - let cid = Cid::from_str("bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4").unwrap(); - let cid_v1 = cid.into_v1().unwrap(); - assert_eq!(cid_v1.version(), Version::V1); - assert_eq!(cid_v1, cid); -} diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..375091676937 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "7a2c4dd78e658a7d27e8b61d2de5b9f66148ace2" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/codecov.yml b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/codecov.yml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/codecov.yml rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/codecov.yml diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/dependabot.yml b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/dependabot.yml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.github/dependabot.yml rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/dependabot.yml diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/workflows/build.yml b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/workflows/build.yml new file mode 100644 index 000000000000..0527a3552a2b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.github/workflows/build.yml @@ -0,0 +1,94 @@ +name: build + +on: [push, pull_request] + +jobs: + build: + name: Build + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, macos-latest, windows-latest] + toolchain: [stable] + runs-on: ${{ matrix.platform }} + + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Cache Dependencies & Build Outputs + uses: actions/cache@v4 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.toolchain }} + components: rustfmt, clippy + + - name: Check Code Format + run: cargo fmt --all -- --check + shell: bash + + - name: Code Lint + run: cargo clippy --all-targets --all-features -- -D warnings + shell: bash + + - name: Test --no-default-features + run: cargo test --no-default-features + shell: bash + + - name: Test + run: cargo test --all-features + shell: bash + + build-no-std: + name: Build no_std + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: thumbv6m-none-eabi + + - name: Build + run: cargo build --no-default-features --target thumbv6m-none-eabi + shell: bash + + build-no-std-serde: + name: Build no_std, but with `serde-codec` feature enabled + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Build + # `thumbv6m-none-eabi` can't be used as Serde doesn't compile there. + run: cargo build --no-default-features --features serde-codec + shell: bash + + coverage: + name: Code Coverage + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Generate Code Coverage + run: | + cargo install cargo-tarpaulin + cargo tarpaulin --all-features --out Xml + + - name: Upload Code Coverage + uses: codecov/codecov-action@v4 diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.gitignore similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/.gitignore rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/.gitignore diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/CHANGELOG.md new file mode 100644 index 000000000000..ded919c7aec3 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/CHANGELOG.md @@ -0,0 +1,52 @@ +# [v0.11.1](https://github.com/multiformats/rust-cid/compare/v0.11.0...v0.11.1) (2024-03-01) + + +### Bug Fixes + +* wrong serde-codec feature gate ([#156](https://github.com/multiformats/rust-cid/issues/156)) ([9699963](https://github.com/multiformats/rust-cid/commit/96999637942c4bd4c1d69fa2a8fcfb8c225ff27c)) + + +# [v0.11.0](https://github.com/multiformats/rust-cid/compare/v0.10.1...v0.11.0) (2023-11-14) + + +### Bug Fixes + +* `varint_read_u64` panics on unwrap in `no_std` environment ([#145](https://github.com/multiformats/rust-cid/issues/145)) ([86c7912](https://github.com/multiformats/rust-cid/commit/86c79126d851316350ad106d0df3e4ae69071874)) +* features accidentally pull in optional dependency ([#147](https://github.com/multiformats/rust-cid/issues/147)) ([942b70e](https://github.com/multiformats/rust-cid/commit/942b70ebd970b9c4a2f330a109227282e7596d29)), closes [#142](https://github.com/multiformats/rust-cid/issues/142) + + +### Features + +* update to multihash 0.19 ([#140](https://github.com/multiformats/rust-cid/issues/140)) ([27b112d](https://github.com/multiformats/rust-cid/commit/27b112d2e6a8a1532f5a1c4ead2cc2e5a68b5dd5)) + + +### BREAKING CHANGES + +* Re-exported multihash changed. The multihash v0.19 release split it into several smaller crates, the `multihash` crate now has less functionality. + + +# [v0.10.1](https://github.com/multiformats/rust-cid/compare/v0.10.0...v0.10.1) (2023-01-09) + + +### Bug Fixes + +* the arb feature needs more multihash features ([#133](https://github.com/multiformats/rust-cid/issues/133)) ([ceca4d9](https://github.com/multiformats/rust-cid/commit/ceca4d93bd90f8ac30987bcc5814f6a655484787)) + + +# [v0.10.0](https://github.com/multiformats/rust-cid/compare/v0.9.0...v0.10.0) (2022-12-22) + + +### chore + +* upgrade to Rust edition 2021 and set MSRV ([#130](https://github.com/multiformats/rust-cid/issues/130)) ([91fd35e](https://github.com/multiformats/rust-cid/commit/91fd35e06f8ae24d66f6ba4598830d8dbc259c8a)) + + +### Features + +* add `encoded_len` and written bytes ([#129](https://github.com/multiformats/rust-cid/issues/129)) ([715771c](https://github.com/multiformats/rust-cid/commit/715771c48fd47969e733ed1faad8b82d9ddbd7ca)) + + +### BREAKING CHANGES + +* Return `Result` (instead of `Result<()>`) now from `Cid::write_bytes`. +* Rust edition 2021 is now used diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/Cargo.toml new file mode 100644 index 000000000000..6304e27abf4d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/Cargo.toml @@ -0,0 +1,121 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "cid" +version = "0.11.1" +authors = ["Friedel Ziegelmayer "] +description = "CID in rust" +homepage = "https://github.com/multiformats/rust-cid" +readme = "README.md" +keywords = [ + "ipld", + "ipfs", + "cid", + "multihash", + "multiformats", +] +license = "MIT" + +[dependencies.arbitrary] +version = "1.1.0" +optional = true + +[dependencies.core2] +version = "0.4" +default-features = false + +[dependencies.multibase] +version = "0.9.1" +optional = true +default-features = false + +[dependencies.multihash] +version = "0.19.0" +default-features = false + +[dependencies.parity-scale-codec] +version = "3.0.0" +features = ["derive"] +optional = true +default-features = false + +[dependencies.quickcheck] +version = "1.0" +optional = true + +[dependencies.rand] +version = "0.8.5" +features = ["small_rng"] +optional = true + +[dependencies.serde] +version = "1.0.116" +optional = true +default-features = false + +[dependencies.serde_bytes] +version = "0.11.5" +optional = true + +[dependencies.unsigned-varint] +version = "0.8.0" +default-features = false + +[dev-dependencies.multihash-codetable] +version = "0.1.0" +features = [ + "digest", + "sha2", +] +default-features = false + +[dev-dependencies.multihash-derive] +version = "0.9.0" +default-features = false + +[dev-dependencies.serde_json] +version = "1.0.59" +features = ["alloc"] +default-features = false + +[features] +alloc = [ + "dep:multibase", + "core2/alloc", + "multihash/alloc", +] +arb = [ + "dep:arbitrary", + "dep:quickcheck", + "dep:rand", + "multihash/arb", +] +default = ["std"] +scale-codec = [ + "dep:parity-scale-codec", + "multihash/scale-codec", +] +serde = [ + "alloc", + "dep:serde", + "dep:serde_bytes", + "multihash/serde", +] +serde-codec = ["serde"] +std = [ + "alloc", + "core2/alloc", + "multihash/std", + "unsigned-varint/std", +] diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/Cargo.toml.orig new file mode 100644 index 000000000000..5a6167affc92 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/Cargo.toml.orig @@ -0,0 +1,39 @@ +[package] +name = "cid" +version = "0.11.1" +description = "CID in rust" +homepage = "https://github.com/multiformats/rust-cid" +authors = ["Friedel Ziegelmayer "] +keywords = ["ipld", "ipfs", "cid", "multihash", "multiformats"] +license = "MIT" +readme = "README.md" +edition = "2021" +rust-version = "1.60" + +[features] +default = ["std"] +std = ["alloc", "core2/alloc", "multihash/std", "unsigned-varint/std"] +alloc = ["dep:multibase", "core2/alloc", "multihash/alloc"] +arb = ["dep:arbitrary", "dep:quickcheck", "dep:rand", "multihash/arb"] +scale-codec = ["dep:parity-scale-codec", "multihash/scale-codec"] +serde-codec = ["serde"] # Deprecated, don't use. +serde = ["alloc", "dep:serde", "dep:serde_bytes", "multihash/serde"] + +[dependencies] +multihash = { version = "0.19.0", default-features = false } +unsigned-varint = { version = "0.8.0", default-features = false } + +multibase = { version = "0.9.1", optional = true, default-features = false } +parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"], optional = true } +quickcheck = { version = "1.0", optional = true } +rand = { version = "0.8.5", optional = true, features = ["small_rng"]} +serde = { version = "1.0.116", default-features = false, optional = true } +serde_bytes = { version = "0.11.5", optional = true } +arbitrary = { version = "1.1.0", optional = true } + +core2 = { version = "0.4", default-features = false } + +[dev-dependencies] +multihash-derive = { version = "0.9.0", default-features = false } +serde_json = { version = "1.0.59", default-features = false, features = ["alloc"]} +multihash-codetable = { version = "0.1.0", default-features = false, features = ["digest", "sha2"] } diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/LICENSE b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/LICENSE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/LICENSE rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/LICENSE diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/README.md b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/README.md new file mode 100644 index 000000000000..5421ae0c03a7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/README.md @@ -0,0 +1,82 @@ +# rust-cid + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) + +[![Build Status](https://github.com/multiformats/rust-cid/workflows/build/badge.svg)](https://github.com/multiformats/rust-cid/actions) +[![Crates.io](https://img.shields.io/crates/v/cid?style=flat-square)](https://crates.io/crates/cid) +[![License](https://img.shields.io/crates/l/cid?style=flat-square)](LICENSE) +[![Documentation](https://docs.rs/cid/badge.svg?style=flat-square)](https://docs.rs/cid) +[![Dependency Status](https://deps.rs/repo/github/multiformats/rust-cid/status.svg)](https://deps.rs/repo/github/multiformats/rust-cid) +[![Coverage Status](https://img.shields.io/codecov/c/github/multiformats/rust-cid?style=flat-square)](https://codecov.io/gh/multiformats/rust-cid) + +> [CID](https://github.com/ipld/cid) implementation in Rust. + +## Table of Contents + +- [Usage](#usage) +- [Testing](#testing) +- [Maintainers](#maintainers) +- [Contribute](#contribute) +- [License](#license) + +## Usage + +```rust +use multihash_codetable::{Code, MultihashDigest}; +use cid::Cid; +use std::convert::TryFrom; + +const RAW: u64 = 0x55; + +fn main() { + let h = Code::Sha2_256.digest(b"beep boop"); + + let cid = Cid::new_v1(RAW, h); + + let data = cid.to_bytes(); + let out = Cid::try_from(data).unwrap(); + + assert_eq!(cid, out); + + let cid_string = cid.to_string(); + assert_eq!( + cid_string, + "bafkreieq5jui4j25lacwomsqgjeswwl3y5zcdrresptwgmfylxo2depppq" + ); + println!("{}", cid_string); +} +``` + +Your `Cargo.toml` needs these dependencies: + +```toml +[dependencies] +cid = "0.7.0" +``` + +You can also run this example from this checkout with `cargo run --example readme`. + +## Testing + +You can run the tests using this command: `cargo test --all-features` + +You can run the tests for `no_std` using this command: `cargo test --no-default-features` + +## Maintainers + +Captain: [@dignifiedquire](https://github.com/dignifiedquire). + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/rust-cid/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +[MIT](LICENSE) © 2017 Friedel Ziegelmayer diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/RELEASE.md b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/RELEASE.md similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/RELEASE.md rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/RELEASE.md diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/arb.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/arb.rs new file mode 100644 index 000000000000..1562a1ff88b7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/arb.rs @@ -0,0 +1,118 @@ +#![cfg(feature = "arb")] + +use std::convert::TryFrom; + +use multihash::Multihash; +use quickcheck::Gen; +use rand::{ + distributions::{weighted::WeightedIndex, Distribution}, + Rng, +}; + +use arbitrary::{size_hint, Unstructured}; +use rand::SeedableRng; + +use crate::cid::SHA2_256; +use crate::{CidGeneric, Version}; + +impl quickcheck::Arbitrary for Version { + fn arbitrary(g: &mut Gen) -> Self { + let version = u64::from(bool::arbitrary(g)); + Version::try_from(version).unwrap() + } +} + +impl quickcheck::Arbitrary for CidGeneric { + fn arbitrary(g: &mut Gen) -> Self { + if S >= 32 && Version::arbitrary(g) == Version::V0 { + let data = std::array::from_fn::<_, 32, _>(|_| u8::arbitrary(g)); + + CidGeneric::new_v0( + Multihash::wrap(SHA2_256, &data).expect("S is guaranteed to be > 32"), + ) + .expect("sha2_256 is a valid hash for cid v0") + } else { + // In real world lower IPLD Codec codes more likely to happen, hence distribute them + // with bias towards smaller values. + let weights = [128, 32, 4, 4, 2, 2, 1, 1]; + let dist = WeightedIndex::new(weights.iter()).unwrap(); + let mut rng = rand::rngs::SmallRng::seed_from_u64(u64::arbitrary(g)); + let codec = match dist.sample(&mut rng) { + 0 => rng.gen_range(0..u64::pow(2, 7)), + 1 => rng.gen_range(u64::pow(2, 7)..u64::pow(2, 14)), + 2 => rng.gen_range(u64::pow(2, 14)..u64::pow(2, 21)), + 3 => rng.gen_range(u64::pow(2, 21)..u64::pow(2, 28)), + 4 => rng.gen_range(u64::pow(2, 28)..u64::pow(2, 35)), + 5 => rng.gen_range(u64::pow(2, 35)..u64::pow(2, 42)), + 6 => rng.gen_range(u64::pow(2, 42)..u64::pow(2, 49)), + 7 => rng.gen_range(u64::pow(2, 56)..u64::pow(2, 63)), + _ => unreachable!(), + }; + + let hash: Multihash = quickcheck::Arbitrary::arbitrary(g); + CidGeneric::new_v1(codec, hash) + } + } +} + +impl<'a, const S: usize> arbitrary::Arbitrary<'a> for CidGeneric { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + if S >= 32 && u.ratio(1, 10)? { + let mh = Multihash::wrap(SHA2_256, u.bytes(32)?).unwrap(); + return Ok(CidGeneric::new_v0(mh).expect("32 bytes is correct for v0")); + } + + let mut codec = 0u64; + let mut len_choice = u.arbitrary::()? | 1; + + while len_choice & 1 == 1 { + len_choice >>= 1; + + let x = u.arbitrary::(); + let next = codec + .checked_shl(8) + .zip(x.ok()) + .map(|(next, x)| next.saturating_add(x as u64)); + + match next { + None => break, + Some(next) => codec = next, + } + } + + Ok(CidGeneric::new_v1(codec, u.arbitrary()?)) + } + + fn size_hint(depth: usize) -> (usize, Option) { + let v1 = size_hint::and_all(&[ + <[u8; 2]>::size_hint(depth), + (0, Some(8)), + as arbitrary::Arbitrary>::size_hint(depth), + ]); + if S >= 32 { + size_hint::and(::size_hint(depth), size_hint::or((32, Some(32)), v1)) + } else { + v1 + } + } +} + +#[cfg(test)] +mod tests { + use crate::CidGeneric; + use arbitrary::{Arbitrary, Unstructured}; + use multihash::Multihash; + + #[test] + fn arbitrary() { + let mut u = Unstructured::new(&[ + 1, 22, 41, 13, 5, 6, 7, 8, 9, 6, 10, 243, 43, 231, 123, 43, 153, 127, 67, 76, 24, 91, + 23, 32, 32, 23, 65, 98, 193, 108, 3, + ]); + let c = as Arbitrary>::arbitrary(&mut u).unwrap(); + let c2 = CidGeneric::<16>::new_v1(22, Multihash::wrap(13, &[6, 7, 8, 9, 6]).unwrap()); + assert_eq!(c.hash(), c2.hash()); + assert_eq!(c.codec(), c2.codec()); + assert_eq!(c, c2) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/cid.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/cid.rs new file mode 100644 index 000000000000..70539a703569 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/cid.rs @@ -0,0 +1,427 @@ +//! This module contains the main CID type. +//! +//! If you are an application developer you likely won't use the `Cid` which is generic over the +//! digest size. Intead you would use the concrete top-level `Cid` type. +//! +//! As a library author that works with CIDs that should support hashes of anysize, you would +//! import the `Cid` type from this module. +use core::convert::TryFrom; + +#[cfg(feature = "alloc")] +use multibase::{encode as base_encode, Base}; + +use multihash::Multihash; +use unsigned_varint::encode as varint_encode; + +#[cfg(feature = "alloc")] +extern crate alloc; + +#[cfg(feature = "alloc")] +use alloc::{ + borrow, + string::{String, ToString}, + vec::Vec, +}; + +#[cfg(feature = "std")] +pub(crate) use unsigned_varint::io::read_u64 as varint_read_u64; + +/// Reads 64 bits from a byte array into a u64 +/// Adapted from unsigned-varint's generated read_u64 function at +/// https://github.com/paritytech/unsigned-varint/blob/master/src/io.rs +#[cfg(not(feature = "std"))] +pub(crate) fn varint_read_u64(mut r: R) -> Result { + use unsigned_varint::decode; + let mut b = varint_encode::u64_buffer(); + for i in 0..b.len() { + let n = r.read(&mut (b[i..i + 1]))?; + if n == 0 { + return Err(Error::VarIntDecodeError); + } else if decode::is_last(b[i]) { + match decode::u64(&b[..=i]) { + Ok((value, _)) => return Ok(value), + Err(_) => return Err(Error::VarIntDecodeError), + } + } + } + Err(Error::VarIntDecodeError) +} + +#[cfg(feature = "std")] +use std::io; + +#[cfg(not(feature = "std"))] +use core2::io; + +use crate::error::{Error, Result}; +use crate::version::Version; + +/// DAG-PB multicodec code +const DAG_PB: u64 = 0x70; +/// The SHA_256 multicodec code +pub(crate) const SHA2_256: u64 = 0x12; + +/// Representation of a CID. +/// +/// The generic is about the allocated size of the multihash. +#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "scale-codec", derive(parity_scale_codec::Decode))] +#[cfg_attr(feature = "scale-codec", derive(parity_scale_codec::Encode))] +pub struct Cid { + /// The version of CID. + version: Version, + /// The codec of CID. + codec: u64, + /// The multihash of CID. + hash: Multihash, +} + +impl Cid { + /// Create a new CIDv0. + pub const fn new_v0(hash: Multihash) -> Result { + if hash.code() != SHA2_256 || hash.size() != 32 { + return Err(Error::InvalidCidV0Multihash); + } + Ok(Self { + version: Version::V0, + codec: DAG_PB, + hash, + }) + } + + /// Create a new CIDv1. + pub const fn new_v1(codec: u64, hash: Multihash) -> Self { + Self { + version: Version::V1, + codec, + hash, + } + } + + /// Create a new CID. + pub const fn new(version: Version, codec: u64, hash: Multihash) -> Result { + match version { + Version::V0 => { + if codec != DAG_PB { + return Err(Error::InvalidCidV0Codec); + } + Self::new_v0(hash) + } + Version::V1 => Ok(Self::new_v1(codec, hash)), + } + } + + /// Convert a CIDv0 to a CIDv1. Returns unchanged if already a CIDv1. + pub fn into_v1(self) -> Result { + match self.version { + Version::V0 => { + if self.codec != DAG_PB { + return Err(Error::InvalidCidV0Codec); + } + Ok(Self::new_v1(self.codec, self.hash)) + } + Version::V1 => Ok(self), + } + } + + /// Returns the cid version. + pub const fn version(&self) -> Version { + self.version + } + + /// Returns the cid codec. + pub const fn codec(&self) -> u64 { + self.codec + } + + /// Returns the cid multihash. + pub const fn hash(&self) -> &Multihash { + &self.hash + } + + /// Reads the bytes from a byte stream. + pub fn read_bytes(mut r: R) -> Result { + let version = varint_read_u64(&mut r)?; + let codec = varint_read_u64(&mut r)?; + + // CIDv0 has the fixed `0x12 0x20` prefix + if [version, codec] == [0x12, 0x20] { + let mut digest = [0u8; 32]; + r.read_exact(&mut digest)?; + let mh = Multihash::wrap(version, &digest).expect("Digest is always 32 bytes."); + return Self::new_v0(mh); + } + + let version = Version::try_from(version)?; + match version { + Version::V0 => Err(Error::InvalidExplicitCidV0), + Version::V1 => { + let mh = Multihash::read(r)?; + Self::new(version, codec, mh) + } + } + } + + fn write_bytes_v1(&self, mut w: W) -> Result { + let mut version_buf = varint_encode::u64_buffer(); + let version = varint_encode::u64(self.version.into(), &mut version_buf); + + let mut codec_buf = varint_encode::u64_buffer(); + let codec = varint_encode::u64(self.codec, &mut codec_buf); + + let mut written = version.len() + codec.len(); + + w.write_all(version)?; + w.write_all(codec)?; + written += self.hash.write(&mut w)?; + + Ok(written) + } + + /// Writes the bytes to a byte stream, returns the number of bytes written. + pub fn write_bytes(&self, w: W) -> Result { + let written = match self.version { + Version::V0 => self.hash.write(w)?, + Version::V1 => self.write_bytes_v1(w)?, + }; + Ok(written) + } + + /// Returns the length in bytes needed to encode this cid into bytes. + pub fn encoded_len(&self) -> usize { + match self.version { + Version::V0 => self.hash.encoded_len(), + Version::V1 => { + let mut version_buf = varint_encode::u64_buffer(); + let version = varint_encode::u64(self.version.into(), &mut version_buf); + + let mut codec_buf = varint_encode::u64_buffer(); + let codec = varint_encode::u64(self.codec, &mut codec_buf); + + version.len() + codec.len() + self.hash.encoded_len() + } + } + } + + /// Returns the encoded bytes of the `Cid`. + #[cfg(feature = "alloc")] + pub fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + let written = self.write_bytes(&mut bytes).unwrap(); + debug_assert_eq!(written, bytes.len()); + bytes + } + + #[cfg(feature = "alloc")] + #[allow(clippy::wrong_self_convention)] + fn to_string_v0(&self) -> String { + Base::Base58Btc.encode(self.hash.to_bytes()) + } + + #[cfg(feature = "alloc")] + #[allow(clippy::wrong_self_convention)] + fn to_string_v1(&self) -> String { + multibase::encode(Base::Base32Lower, self.to_bytes().as_slice()) + } + + /// Convert CID into a multibase encoded string + /// + /// # Example + /// + /// ``` + /// use cid::Cid; + /// use multibase::Base; + /// use multihash_codetable::{Code, MultihashDigest}; + /// + /// const RAW: u64 = 0x55; + /// + /// let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); + /// let encoded = cid.to_string_of_base(Base::Base64).unwrap(); + /// assert_eq!(encoded, "mAVUSICwmtGto/8aP+ZtFPB0wQTQTQi1wZIO/oPmKXohiZueu"); + /// ``` + #[cfg(feature = "alloc")] + pub fn to_string_of_base(&self, base: Base) -> Result { + match self.version { + Version::V0 => { + if base == Base::Base58Btc { + Ok(self.to_string_v0()) + } else { + Err(Error::InvalidCidV0Base) + } + } + Version::V1 => Ok(base_encode(base, self.to_bytes())), + } + } +} + +impl Default for Cid { + fn default() -> Self { + Self { + version: Version::V1, + codec: 0, + hash: Multihash::::default(), + } + } +} + +// TODO: remove the dependency on alloc by fixing +// https://github.com/multiformats/rust-multibase/issues/33 +#[cfg(feature = "alloc")] +impl core::fmt::Display for Cid { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + let output = match self.version { + Version::V0 => self.to_string_v0(), + Version::V1 => self.to_string_v1(), + }; + write!(f, "{}", output) + } +} + +#[cfg(feature = "alloc")] +impl core::fmt::Debug for Cid { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + if f.alternate() { + f.debug_struct("Cid") + .field("version", &self.version()) + .field("codec", &self.codec()) + .field("hash", self.hash()) + .finish() + } else { + let output = match self.version { + Version::V0 => self.to_string_v0(), + Version::V1 => self.to_string_v1(), + }; + write!(f, "Cid({})", output) + } + } +} + +#[cfg(feature = "alloc")] +impl core::str::FromStr for Cid { + type Err = Error; + + fn from_str(cid_str: &str) -> Result { + Self::try_from(cid_str) + } +} + +#[cfg(feature = "alloc")] +impl TryFrom for Cid { + type Error = Error; + + fn try_from(cid_str: String) -> Result { + Self::try_from(cid_str.as_str()) + } +} + +#[cfg(feature = "alloc")] +impl TryFrom<&str> for Cid { + type Error = Error; + + fn try_from(cid_str: &str) -> Result { + static IPFS_DELIMETER: &str = "/ipfs/"; + + let hash = match cid_str.find(IPFS_DELIMETER) { + Some(index) => &cid_str[index + IPFS_DELIMETER.len()..], + _ => cid_str, + }; + + if hash.len() < 2 { + return Err(Error::InputTooShort); + } + + let decoded = if Version::is_v0_str(hash) { + Base::Base58Btc.decode(hash)? + } else { + let (_, decoded) = multibase::decode(hash)?; + decoded + }; + + Self::try_from(decoded) + } +} + +#[cfg(feature = "alloc")] +impl TryFrom> for Cid { + type Error = Error; + + fn try_from(bytes: Vec) -> Result { + Self::try_from(bytes.as_slice()) + } +} + +impl TryFrom<&[u8]> for Cid { + type Error = Error; + + fn try_from(mut bytes: &[u8]) -> Result { + Self::read_bytes(&mut bytes) + } +} + +impl From<&Cid> for Cid { + fn from(cid: &Cid) -> Self { + *cid + } +} + +#[cfg(feature = "alloc")] +impl From> for Vec { + fn from(cid: Cid) -> Self { + cid.to_bytes() + } +} + +#[cfg(feature = "alloc")] +impl From> for String { + fn from(cid: Cid) -> Self { + cid.to_string() + } +} + +#[cfg(feature = "alloc")] +impl<'a, const S: usize> From> for borrow::Cow<'a, Cid> { + fn from(from: Cid) -> Self { + borrow::Cow::Owned(from) + } +} + +#[cfg(feature = "alloc")] +impl<'a, const S: usize> From<&'a Cid> for borrow::Cow<'a, Cid> { + fn from(from: &'a Cid) -> Self { + borrow::Cow::Borrowed(from) + } +} + +#[cfg(test)] +mod tests { + #[test] + #[cfg(feature = "scale-codec")] + fn test_cid_scale_codec() { + use super::Cid; + use parity_scale_codec::{Decode, Encode}; + + let cid = Cid::<64>::default(); + let bytes = cid.encode(); + let cid2 = Cid::decode(&mut &bytes[..]).unwrap(); + assert_eq!(cid, cid2); + } + + #[test] + #[cfg(feature = "std")] + fn test_debug_instance() { + use super::Cid; + use std::str::FromStr; + let cid = + Cid::<64>::from_str("bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4") + .unwrap(); + // short debug + assert_eq!( + &format!("{:?}", cid), + "Cid(bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4)" + ); + // verbose debug + let mut txt = format!("{:#?}", cid); + txt.retain(|c| !c.is_whitespace()); + assert_eq!(&txt, "Cid{version:V1,codec:113,hash:Multihash{code:18,size:32,digest:[41,119,46,195,0,149,81,168,63,176,40,43,118,60,191,149,226,240,10,35,152,172,31,178,232,48,180,238,36,196,112,55,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,],},}"); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/error.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/error.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/error.rs rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/error.rs diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/lib.rs new file mode 100644 index 000000000000..709eea2aec3f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/lib.rs @@ -0,0 +1,35 @@ +//! # cid +//! +//! Implementation of [cid](https://github.com/ipld/cid) in Rust. + +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +mod cid; +mod error; +mod version; + +#[cfg(any(test, feature = "arb"))] +mod arb; +#[cfg(feature = "serde")] +pub mod serde; + +pub use self::cid::Cid as CidGeneric; +pub use self::error::{Error, Result}; +pub use self::version::Version; + +#[cfg(feature = "alloc")] +pub use multibase; +pub use multihash; + +// Doctest the readme! +#[doc = include_str!("../README.md")] +#[cfg(all(doctest, feature = "std"))] +pub struct ReadmeDoctest; + +/// A Cid that contains a multihash with an allocated size of 512 bits. +/// +/// This is the same digest size the default multihash code table has. +/// +/// If you need a CID that is generic over its digest size, use [`CidGeneric`] instead. +pub type Cid = CidGeneric<64>; diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/serde.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/serde.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/serde.rs rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/serde.rs diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/version.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/version.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/cid-0.10.1/src/version.rs rename to third_party/rust/chromium_crates_io/vendor/cid-0.11.1/src/version.rs diff --git a/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/tests/lib.rs b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/tests/lib.rs new file mode 100644 index 000000000000..212aa62ad22b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/cid-0.11.1/tests/lib.rs @@ -0,0 +1,227 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "std")] +mod std_tests { + use std::collections::HashMap; + use std::convert::{TryFrom, TryInto}; + use std::str::FromStr; + + use cid::{Cid, CidGeneric, Error, Version}; + use multibase::Base; + use multihash_codetable::{Code, MultihashDigest}; + + const RAW: u64 = 0x55; + const DAG_PB: u64 = 0x70; + + #[test] + fn basic_marshalling() { + let h = Code::Sha2_256.digest(b"beep boop"); + + let cid = Cid::new_v1(DAG_PB, h); + + let data = cid.to_bytes(); + let out = Cid::try_from(data.clone()).unwrap(); + assert_eq!(cid, out); + + let out2 = data.try_into().unwrap(); + assert_eq!(cid, out2); + + let s = cid.to_string(); + let out3 = Cid::try_from(&s[..]).unwrap(); + assert_eq!(cid, out3); + + let out4 = (&s[..]).try_into().unwrap(); + assert_eq!(cid, out4); + } + + #[test] + fn empty_string() { + assert!(matches!(Cid::try_from(""), Err(Error::InputTooShort))) + } + + #[test] + fn v0_handling() { + let old = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"; + let cid = Cid::try_from(old).unwrap(); + + assert_eq!(cid.version(), Version::V0); + assert_eq!(cid.to_string(), old); + } + + #[test] + fn from_str() { + let cid: Cid = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" + .parse() + .unwrap(); + assert_eq!(cid.version(), Version::V0); + + let bad = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zIII".parse::(); + assert!(matches!(bad, Err(Error::ParsingError))); + } + + #[test] + fn v0_error() { + let bad = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zIII"; + assert!(matches!(Cid::try_from(bad), Err(Error::ParsingError))); + } + + #[test] + fn from() { + let the_hash = "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"; + + let cases = vec![ + format!("/ipfs/{:}", &the_hash), + format!("https://ipfs.io/ipfs/{:}", &the_hash), + format!("http://localhost:8080/ipfs/{:}", &the_hash), + ]; + + for case in cases { + let cid = Cid::try_from(case).unwrap(); + assert_eq!(cid.version(), Version::V0); + assert_eq!(cid.to_string(), the_hash); + } + } + + #[test] + fn test_hash() { + let data: Vec = vec![1, 2, 3]; + let hash = Code::Sha2_256.digest(&data); + let mut map = HashMap::new(); + let cid = Cid::new_v0(hash).unwrap(); + map.insert(cid, data.clone()); + assert_eq!(&data, map.get(&cid).unwrap()); + } + + #[test] + fn test_base32() { + let cid = + Cid::from_str("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); + assert_eq!(cid.version(), Version::V1); + assert_eq!(cid.codec(), RAW); + assert_eq!(cid.hash(), &Code::Sha2_256.digest(b"foo")); + } + + #[test] + fn to_string() { + let expected_cid = "bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy"; + let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); + assert_eq!(cid.to_string(), expected_cid); + } + + #[test] + fn to_string_of_base32() { + let expected_cid = "bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy"; + let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); + assert_eq!( + cid.to_string_of_base(Base::Base32Lower).unwrap(), + expected_cid + ); + } + + #[test] + fn to_string_of_base64() { + let expected_cid = "mAVUSICwmtGto/8aP+ZtFPB0wQTQTQi1wZIO/oPmKXohiZueu"; + let cid = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); + assert_eq!(cid.to_string_of_base(Base::Base64).unwrap(), expected_cid); + } + + #[test] + fn to_string_of_base58_v0() { + let expected_cid = "QmRJzsvyCQyizr73Gmms8ZRtvNxmgqumxc2KUp71dfEmoj"; + let cid = Cid::new_v0(Code::Sha2_256.digest(b"foo")).unwrap(); + assert_eq!( + cid.to_string_of_base(Base::Base58Btc).unwrap(), + expected_cid + ); + } + + #[test] + fn to_string_of_base_v0_error() { + let cid = Cid::new_v0(Code::Sha2_256.digest(b"foo")).unwrap(); + assert!(matches!( + cid.to_string_of_base(Base::Base16Upper), + Err(Error::InvalidCidV0Base) + )); + } + + #[test] + fn explicit_v0_is_disallowed() { + use std::io::Cursor; + assert!(matches!( + Cid::read_bytes(Cursor::new([ + 0x00, 0x70, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12 + ])), + Err(Error::InvalidExplicitCidV0) + )); + } + + #[test] + fn new_v0_accepts_only_32_bytes() { + use multihash::Multihash; + const SHA2_256: u64 = 0x12; + + for i in 1..64 { + if i == 32 { + continue; + } + assert!(matches!( + Cid::new_v0(Multihash::wrap(SHA2_256, &vec![7; i]).unwrap()), + Err(Error::InvalidCidV0Multihash) + )); + } + } + + fn a_function_that_takes_a_generic_cid(cid: &CidGeneric) -> String { + cid.to_string() + } + + // This test is about having something implemented that used the default size of `Cid`. So the code + // is using `Cid` instead of `Cid`. The code will still work with other sizes. + #[test] + fn method_can_take_differently_sized_cids() { + #[derive(Clone, Copy, Debug, Eq, PartialEq, MultihashDigest)] + #[mh(alloc_size = 128)] + enum Code128 { + #[mh(code = 0x12, hasher = multihash_codetable::Sha2_256)] + Sha2_256, + } + + let cid_default = Cid::new_v1(RAW, Code::Sha2_256.digest(b"foo")); + let cid_128 = CidGeneric::<128>::new_v1(RAW, Code128::Sha2_256.digest(b"foo")); + + assert_eq!( + a_function_that_takes_a_generic_cid(&cid_default), + a_function_that_takes_a_generic_cid(&cid_128) + ); + } + + #[test] + fn test_into_v1() { + let cid = Cid::from_str("QmTPcW343HGMdoxarwvHHoPhkbo5GfNYjnZkyW5DBtpvLe").unwrap(); + let cid_v1 = cid.into_v1().unwrap(); + assert_eq!(cid_v1.version(), Version::V1); + assert_eq!( + cid_v1.to_string(), + "bafybeiclbsxcvqpfliqcejqz5ghpvw4r7vktjkyk3ruvjvdmam5azct2v4" + ); + + let cid = + Cid::from_str("bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4").unwrap(); + let cid_v1 = cid.into_v1().unwrap(); + assert_eq!(cid_v1.version(), Version::V1); + assert_eq!(cid_v1, cid); + } +} + +#[cfg(all(test, not(feature = "std")))] +mod no_std_tests { + use cid::Cid; + + #[test] + fn validate_cid_unwrap_panics() { + // This should not panic, but should return an error. + // bad_cid created during fuzz testing + let bad_cid = [255, 255, 255, 255, 0, 6, 85, 0]; + assert!(Cid::read_bytes(&bad_cid[..]).is_err()); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/.cargo_vcs_info.json new file mode 100644 index 000000000000..35909123a317 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/.cargo_vcs_info.json @@ -0,0 +1,5 @@ +{ + "git": { + "sha1": "cfbfca49ec674db3c8c3670427afd64cf3472895" + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/CHANGELOG.md new file mode 100644 index 000000000000..d63daa7b7797 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/CHANGELOG.md @@ -0,0 +1,80 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 0.11.0 (2021-04-28) +### Changed +- Bump `cipher` dependency to v0.3 ([#621]) + +[#621]: https://github.com/RustCrypto/traits/pull/621 + +## 0.10.0 (2020-10-15) +### Changed +- Replace `block-cipher` crate with new `cipher` crate ([#337], [#338]) + +[#338]: https://github.com/RustCrypto/traits/pull/338 +[#337]: https://github.com/RustCrypto/traits/pull/337 + +## 0.9.1 (2020-08-12) +### Added +- Re-export the `block-cipher` crate ([#257]) + +[#257]: https://github.com/RustCrypto/traits/pull/257 + +## 0.9.0 (2020-08-10) +### Added +- `FromBlockCipher` trait and blanket implementation of the `NewMac` trait +for it ([#217]) + +### Changed +- Updated test vectors storage to `blobby v0.3` ([#217]) + +### Removed +- `impl_write!` macro ([#217]) + +[#217]: https://github.com/RustCrypto/traits/pull/217 + +## 0.8.0 (2020-06-04) +### Added +- `impl_write!` macro ([#134]) + +### Changed +- Bump `generic-array` dependency to v0.14 ([#144]) +- Split `Mac` initialization into `NewMac` trait ([#133]) +- Rename `MacResult` => `Output`, `code` => `into_bytes` ([#114]) +- Rename `Input::input` to `Update::update` ([#111]) +- Update to 2018 edition ([#108]) +- Bump `subtle` dependency from v1.0 to v2.0 ([#33]) + +[#144]: https://github.com/RustCrypto/traits/pull/95 +[#134]: https://github.com/RustCrypto/traits/pull/134 +[#133]: https://github.com/RustCrypto/traits/pull/133 +[#114]: https://github.com/RustCrypto/traits/pull/114 +[#111]: https://github.com/RustCrypto/traits/pull/111 +[#108]: https://github.com/RustCrypto/traits/pull/108 +[#33]: https://github.com/RustCrypto/traits/pull/33 + +## 0.7.0 (2018-10-01) + +## 0.6.2 (2018-06-21) + +## 0.6.1 (2018-06-20) + +## 0.6.0 (2017-11-26) + +## 0.5.2 (2017-11-20) + +## 0.5.1 (2017-11-15) + +## 0.5.0 (2017-11-14) + +## 0.4.0 (2017-06-12) + +## 0.3.0 (2017-05-14) + +## 0.2.0 (2017-05-14) + +## 0.1.0 (2016-10-14) diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/Cargo.toml new file mode 100644 index 000000000000..7f3402b7d9af --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "crypto-mac" +version = "0.11.0" +authors = ["RustCrypto Developers"] +description = "Trait for Message Authentication Code (MAC) algorithms" +documentation = "https://docs.rs/crypto-mac" +readme = "README.md" +keywords = ["crypto", "mac"] +categories = ["cryptography", "no-std"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/RustCrypto/traits" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] +[dependencies.blobby] +version = "0.3" +optional = true + +[dependencies.cipher] +version = "0.3" +optional = true + +[dependencies.generic-array] +version = "0.14" + +[dependencies.subtle] +version = "2" +default-features = false + +[features] +dev = ["blobby"] +std = [] diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/Cargo.toml.orig new file mode 100644 index 000000000000..eaa2837ac413 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/Cargo.toml.orig @@ -0,0 +1,26 @@ +[package] +name = "crypto-mac" +description = "Trait for Message Authentication Code (MAC) algorithms" +version = "0.11.0" +authors = ["RustCrypto Developers"] +license = "MIT OR Apache-2.0" +readme = "README.md" +edition = "2018" +documentation = "https://docs.rs/crypto-mac" +repository = "https://github.com/RustCrypto/traits" +keywords = ["crypto", "mac"] +categories = ["cryptography", "no-std"] + +[dependencies] +generic-array = "0.14" +cipher = { version = "0.3", optional = true, path = "../cipher" } +subtle = { version = "2", default-features = false } +blobby = { version = "0.3", optional = true } + +[features] +dev = ["blobby"] +std = [] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/LICENSE-APACHE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/LICENSE-APACHE rename to third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/LICENSE-APACHE diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/LICENSE-MIT similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/LICENSE-MIT rename to third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/LICENSE-MIT diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/README.md b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/README.md similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/README.md rename to third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/README.md diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/dev.rs b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/dev.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/dev.rs rename to third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/dev.rs diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/errors.rs b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/errors.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/errors.rs rename to third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/errors.rs diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/lib.rs new file mode 100644 index 000000000000..1f101f157e7b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.0/src/lib.rs @@ -0,0 +1,159 @@ +//! This crate provides trait for Message Authentication Code (MAC) algorithms. + +#![no_std] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg", + html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg" +)] +#![forbid(unsafe_code)] +#![warn(missing_docs, rust_2018_idioms)] + +#[cfg(feature = "std")] +extern crate std; + +#[cfg(feature = "cipher")] +pub use cipher; +#[cfg(feature = "cipher")] +use cipher::{BlockCipher, NewBlockCipher}; + +#[cfg(feature = "dev")] +#[cfg_attr(docsrs, doc(cfg(feature = "dev")))] +pub mod dev; + +mod errors; + +pub use crate::errors::{InvalidKeyLength, MacError}; +pub use generic_array::{self, typenum::consts}; + +use generic_array::typenum::Unsigned; +use generic_array::{ArrayLength, GenericArray}; +use subtle::{Choice, ConstantTimeEq}; + +/// Key for an algorithm that implements [`NewMac`]. +pub type Key = GenericArray::KeySize>; + +/// Instantiate a [`Mac`] algorithm. +pub trait NewMac: Sized { + /// Key size in bytes with which cipher guaranteed to be initialized. + type KeySize: ArrayLength; + + /// Initialize new MAC instance from key with fixed size. + fn new(key: &Key) -> Self; + + /// Initialize new MAC instance from key with variable size. + /// + /// Default implementation will accept only keys with length equal to + /// `KeySize`, but some MACs can accept range of key lengths. + fn new_from_slice(key: &[u8]) -> Result { + if key.len() != Self::KeySize::to_usize() { + Err(InvalidKeyLength) + } else { + Ok(Self::new(GenericArray::from_slice(key))) + } + } +} + +/// The [`Mac`] trait defines methods for a Message Authentication algorithm. +pub trait Mac: Clone { + /// Output size of the [[`Mac`]] + type OutputSize: ArrayLength; + + /// Update MAC state with the given data. + fn update(&mut self, data: &[u8]); + + /// Reset [`Mac`] instance. + fn reset(&mut self); + + /// Obtain the result of a [`Mac`] computation as a [`Output`] and consume + /// [`Mac`] instance. + fn finalize(self) -> Output; + + /// Obtain the result of a [`Mac`] computation as a [`Output`] and reset + /// [`Mac`] instance. + fn finalize_reset(&mut self) -> Output { + let res = self.clone().finalize(); + self.reset(); + res + } + + /// Check if tag/code value is correct for the processed input. + fn verify(self, tag: &[u8]) -> Result<(), MacError> { + let choice = self.finalize().bytes.ct_eq(tag); + + if choice.unwrap_u8() == 1 { + Ok(()) + } else { + Err(MacError) + } + } +} + +/// [`Output`] is a thin wrapper around bytes array which provides a safe `Eq` +/// implementation that runs in a fixed time. +#[derive(Clone)] +pub struct Output { + bytes: GenericArray, +} + +impl Output { + /// Create a new MAC [`Output`]. + pub fn new(bytes: GenericArray) -> Output { + Output { bytes } + } + + /// Get the MAC tag/code value as a byte array. + /// + /// Be very careful using this method, since incorrect use of the tag value + /// may permit timing attacks which defeat the security provided by the + /// [`Mac`] trait. + pub fn into_bytes(self) -> GenericArray { + self.bytes + } +} + +impl ConstantTimeEq for Output { + fn ct_eq(&self, other: &Self) -> Choice { + self.bytes.ct_eq(&other.bytes) + } +} + +impl PartialEq for Output { + fn eq(&self, x: &Output) -> bool { + self.ct_eq(x).unwrap_u8() == 1 + } +} + +impl Eq for Output {} + +#[cfg(feature = "cipher")] +#[cfg_attr(docsrs, doc(cfg(feature = "cipher")))] +/// Trait for MAC functions which can be created from block cipher. +pub trait FromBlockCipher { + /// Block cipher type + type Cipher: BlockCipher; + + /// Create new MAC isntance from provided block cipher. + fn from_cipher(cipher: Self::Cipher) -> Self; +} + +#[cfg(feature = "cipher")] +#[cfg_attr(docsrs, doc(cfg(feature = "cipher")))] +impl NewMac for T +where + T: FromBlockCipher, + T::Cipher: NewBlockCipher, +{ + type KeySize = <::Cipher as NewBlockCipher>::KeySize; + + fn new(key: &Key) -> Self { + let cipher = ::Cipher::new(key); + Self::from_cipher(cipher) + } + + fn new_from_slice(key: &[u8]) -> Result { + ::Cipher::new_from_slice(key) + .map_err(|_| InvalidKeyLength) + .map(Self::from_cipher) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/.cargo_vcs_info.json deleted file mode 100644 index 08d894d59103..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/.cargo_vcs_info.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "git": { - "sha1": "95e57639399c193e22b5ea54c02f74bd7b6a9b6c" - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/CHANGELOG.md deleted file mode 100644 index 32b5465b734d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/CHANGELOG.md +++ /dev/null @@ -1,92 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## 0.11.1 (2021-07-20) -### Changed -- Pin `subtle` dependency to v2.4 ([#691]) - -[#691]: https://github.com/RustCrypto/traits/pull/691 - -## 0.11.0 (2021-04-28) -### Changed -- Bump `cipher` dependency to v0.3 ([#621]) - -[#621]: https://github.com/RustCrypto/traits/pull/621 - -## 0.10.1 (2021-07-20) -### Changed -- Pin `subtle` dependency to v2.4 ([#690]) - -[#690]: https://github.com/RustCrypto/traits/pull/690 - -## 0.10.0 (2020-10-15) -### Changed -- Replace `block-cipher` crate with new `cipher` crate ([#337], [#338]) - -[#338]: https://github.com/RustCrypto/traits/pull/338 -[#337]: https://github.com/RustCrypto/traits/pull/337 - -## 0.9.1 (2020-08-12) -### Added -- Re-export the `block-cipher` crate ([#257]) - -[#257]: https://github.com/RustCrypto/traits/pull/257 - -## 0.9.0 (2020-08-10) -### Added -- `FromBlockCipher` trait and blanket implementation of the `NewMac` trait -for it ([#217]) - -### Changed -- Updated test vectors storage to `blobby v0.3` ([#217]) - -### Removed -- `impl_write!` macro ([#217]) - -[#217]: https://github.com/RustCrypto/traits/pull/217 - -## 0.8.0 (2020-06-04) -### Added -- `impl_write!` macro ([#134]) - -### Changed -- Bump `generic-array` dependency to v0.14 ([#144]) -- Split `Mac` initialization into `NewMac` trait ([#133]) -- Rename `MacResult` => `Output`, `code` => `into_bytes` ([#114]) -- Rename `Input::input` to `Update::update` ([#111]) -- Update to 2018 edition ([#108]) -- Bump `subtle` dependency from v1.0 to v2.0 ([#33]) - -[#144]: https://github.com/RustCrypto/traits/pull/95 -[#134]: https://github.com/RustCrypto/traits/pull/134 -[#133]: https://github.com/RustCrypto/traits/pull/133 -[#114]: https://github.com/RustCrypto/traits/pull/114 -[#111]: https://github.com/RustCrypto/traits/pull/111 -[#108]: https://github.com/RustCrypto/traits/pull/108 -[#33]: https://github.com/RustCrypto/traits/pull/33 - -## 0.7.0 (2018-10-01) - -## 0.6.2 (2018-06-21) - -## 0.6.1 (2018-06-20) - -## 0.6.0 (2017-11-26) - -## 0.5.2 (2017-11-20) - -## 0.5.1 (2017-11-15) - -## 0.5.0 (2017-11-14) - -## 0.4.0 (2017-06-12) - -## 0.3.0 (2017-05-14) - -## 0.2.0 (2017-05-14) - -## 0.1.0 (2016-10-14) diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/Cargo.toml deleted file mode 100644 index 5a7a906c4dc1..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "crypto-mac" -version = "0.11.1" -authors = ["RustCrypto Developers"] -description = "Trait for Message Authentication Code (MAC) algorithms" -documentation = "https://docs.rs/crypto-mac" -readme = "README.md" -keywords = ["crypto", "mac"] -categories = ["cryptography", "no-std"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/RustCrypto/traits" -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] -[dependencies.blobby] -version = "0.3" -optional = true - -[dependencies.cipher] -version = "0.3" -optional = true - -[dependencies.generic-array] -version = "0.14" - -[dependencies.subtle] -version = "=2.4" -default-features = false - -[features] -dev = ["blobby"] -std = [] diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/Cargo.toml.orig deleted file mode 100644 index 277cb4f86029..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/Cargo.toml.orig +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "crypto-mac" -description = "Trait for Message Authentication Code (MAC) algorithms" -version = "0.11.1" # Also update html_root_url in lib.rs when bumping this -authors = ["RustCrypto Developers"] -license = "MIT OR Apache-2.0" -readme = "README.md" -edition = "2018" -documentation = "https://docs.rs/crypto-mac" -repository = "https://github.com/RustCrypto/traits" -keywords = ["crypto", "mac"] -categories = ["cryptography", "no-std"] - -[dependencies] -generic-array = "0.14" -cipher = { version = "0.3", optional = true, path = "../cipher" } -subtle = { version = "=2.4", default-features = false } -blobby = { version = "0.3", optional = true } - -[features] -dev = ["blobby"] -std = [] - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] diff --git a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/lib.rs deleted file mode 100644 index c605c95f1e10..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/crypto-mac-0.11.1/src/lib.rs +++ /dev/null @@ -1,160 +0,0 @@ -//! This crate provides trait for Message Authentication Code (MAC) algorithms. - -#![no_std] -#![cfg_attr(docsrs, feature(doc_cfg))] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg", - html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg", - html_root_url = "https://docs.rs/crypto-mac/0.11.1" -)] -#![forbid(unsafe_code)] -#![warn(missing_docs, rust_2018_idioms)] - -#[cfg(feature = "std")] -extern crate std; - -#[cfg(feature = "cipher")] -pub use cipher; -#[cfg(feature = "cipher")] -use cipher::{BlockCipher, NewBlockCipher}; - -#[cfg(feature = "dev")] -#[cfg_attr(docsrs, doc(cfg(feature = "dev")))] -pub mod dev; - -mod errors; - -pub use crate::errors::{InvalidKeyLength, MacError}; -pub use generic_array::{self, typenum::consts}; - -use generic_array::typenum::Unsigned; -use generic_array::{ArrayLength, GenericArray}; -use subtle::{Choice, ConstantTimeEq}; - -/// Key for an algorithm that implements [`NewMac`]. -pub type Key = GenericArray::KeySize>; - -/// Instantiate a [`Mac`] algorithm. -pub trait NewMac: Sized { - /// Key size in bytes with which cipher guaranteed to be initialized. - type KeySize: ArrayLength; - - /// Initialize new MAC instance from key with fixed size. - fn new(key: &Key) -> Self; - - /// Initialize new MAC instance from key with variable size. - /// - /// Default implementation will accept only keys with length equal to - /// `KeySize`, but some MACs can accept range of key lengths. - fn new_from_slice(key: &[u8]) -> Result { - if key.len() != Self::KeySize::to_usize() { - Err(InvalidKeyLength) - } else { - Ok(Self::new(GenericArray::from_slice(key))) - } - } -} - -/// The [`Mac`] trait defines methods for a Message Authentication algorithm. -pub trait Mac: Clone { - /// Output size of the [[`Mac`]] - type OutputSize: ArrayLength; - - /// Update MAC state with the given data. - fn update(&mut self, data: &[u8]); - - /// Reset [`Mac`] instance. - fn reset(&mut self); - - /// Obtain the result of a [`Mac`] computation as a [`Output`] and consume - /// [`Mac`] instance. - fn finalize(self) -> Output; - - /// Obtain the result of a [`Mac`] computation as a [`Output`] and reset - /// [`Mac`] instance. - fn finalize_reset(&mut self) -> Output { - let res = self.clone().finalize(); - self.reset(); - res - } - - /// Check if tag/code value is correct for the processed input. - fn verify(self, tag: &[u8]) -> Result<(), MacError> { - let choice = self.finalize().bytes.ct_eq(tag); - - if choice.unwrap_u8() == 1 { - Ok(()) - } else { - Err(MacError) - } - } -} - -/// [`Output`] is a thin wrapper around bytes array which provides a safe `Eq` -/// implementation that runs in a fixed time. -#[derive(Clone)] -pub struct Output { - bytes: GenericArray, -} - -impl Output { - /// Create a new MAC [`Output`]. - pub fn new(bytes: GenericArray) -> Output { - Output { bytes } - } - - /// Get the MAC tag/code value as a byte array. - /// - /// Be very careful using this method, since incorrect use of the tag value - /// may permit timing attacks which defeat the security provided by the - /// [`Mac`] trait. - pub fn into_bytes(self) -> GenericArray { - self.bytes - } -} - -impl ConstantTimeEq for Output { - fn ct_eq(&self, other: &Self) -> Choice { - self.bytes.ct_eq(&other.bytes) - } -} - -impl PartialEq for Output { - fn eq(&self, x: &Output) -> bool { - self.ct_eq(x).unwrap_u8() == 1 - } -} - -impl Eq for Output {} - -#[cfg(feature = "cipher")] -#[cfg_attr(docsrs, doc(cfg(feature = "cipher")))] -/// Trait for MAC functions which can be created from block cipher. -pub trait FromBlockCipher { - /// Block cipher type - type Cipher: BlockCipher; - - /// Create new MAC isntance from provided block cipher. - fn from_cipher(cipher: Self::Cipher) -> Self; -} - -#[cfg(feature = "cipher")] -#[cfg_attr(docsrs, doc(cfg(feature = "cipher")))] -impl NewMac for T -where - T: FromBlockCipher, - T::Cipher: NewBlockCipher, -{ - type KeySize = <::Cipher as NewBlockCipher>::KeySize; - - fn new(key: &Key) -> Self { - let cipher = ::Cipher::new(key); - Self::from_cipher(cipher) - } - - fn new_from_slice(key: &[u8]) -> Result { - ::Cipher::new_from_slice(key) - .map_err(|_| InvalidKeyLength) - .map(Self::from_cipher) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..51b42ffabdcb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "8a56bf0407d8eecf2165775a95a67cc0e956e50d" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.gitignore new file mode 100644 index 000000000000..4fffb2f89cbd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/Cargo.toml new file mode 100644 index 000000000000..925d53e88376 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/Cargo.toml @@ -0,0 +1,27 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +rust-version = "1.6" +name = "equivalent" +version = "1.0.1" +description = "Traits for key comparison in maps." +readme = "README.md" +keywords = [ + "hashmap", + "no_std", +] +categories = [ + "data-structures", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/cuviper/equivalent" diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/Cargo.toml.orig new file mode 100644 index 000000000000..8fcf21a84d95 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/Cargo.toml.orig @@ -0,0 +1,9 @@ +[package] +name = "equivalent" +version = "1.0.1" +rust-version = "1.6" +license = "Apache-2.0 OR MIT" +description = "Traits for key comparison in maps." +repository = "https://github.com/cuviper/equivalent" +keywords = ["hashmap", "no_std"] +categories = ["data-structures", "no-std"] diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/LICENSE-APACHE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/LICENSE-APACHE rename to third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/LICENSE-APACHE diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/LICENSE-MIT new file mode 100644 index 000000000000..5ac40dae7f6f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016--2023 + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/README.md b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/README.md new file mode 100644 index 000000000000..8ff7e24c8ce9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/README.md @@ -0,0 +1,25 @@ +# Equivalent + +[![crates.io](https://img.shields.io/crates/v/equivalent.svg)](https://crates.io/crates/equivalent) +[![docs](https://docs.rs/equivalent/badge.svg)](https://docs.rs/equivalent) + +`Equivalent` and `Comparable` are Rust traits for key comparison in maps. + +These may be used in the implementation of maps where the lookup type `Q` +may be different than the stored key type `K`. + +* `Q: Equivalent` checks for equality, similar to the `HashMap` + constraint `K: Borrow, Q: Eq`. +* `Q: Comparable` checks the ordering, similar to the `BTreeMap` + constraint `K: Borrow, Q: Ord`. + +These traits are not used by the maps in the standard library, but they may +add more flexibility in third-party map implementations, especially in +situations where a strict `K: Borrow` relationship is not available. + +## License + +Equivalent is distributed under the terms of both the MIT license and the +Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and +[LICENSE-MIT](LICENSE-MIT) for details. Opening a pull request is +assumed to signal agreement with these licensing terms. diff --git a/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/src/lib.rs new file mode 100644 index 000000000000..09ba58dff3a8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/equivalent-1.0.1/src/lib.rs @@ -0,0 +1,113 @@ +//! [`Equivalent`] and [`Comparable`] are traits for key comparison in maps. +//! +//! These may be used in the implementation of maps where the lookup type `Q` +//! may be different than the stored key type `K`. +//! +//! * `Q: Equivalent` checks for equality, similar to the `HashMap` +//! constraint `K: Borrow, Q: Eq`. +//! * `Q: Comparable` checks the ordering, similar to the `BTreeMap` +//! constraint `K: Borrow, Q: Ord`. +//! +//! These traits are not used by the maps in the standard library, but they may +//! add more flexibility in third-party map implementations, especially in +//! situations where a strict `K: Borrow` relationship is not available. +//! +//! # Examples +//! +//! ``` +//! use equivalent::*; +//! use std::cmp::Ordering; +//! +//! pub struct Pair(pub A, pub B); +//! +//! impl<'a, A: ?Sized, B: ?Sized, C, D> Equivalent<(C, D)> for Pair<&'a A, &'a B> +//! where +//! A: Equivalent, +//! B: Equivalent, +//! { +//! fn equivalent(&self, key: &(C, D)) -> bool { +//! self.0.equivalent(&key.0) && self.1.equivalent(&key.1) +//! } +//! } +//! +//! impl<'a, A: ?Sized, B: ?Sized, C, D> Comparable<(C, D)> for Pair<&'a A, &'a B> +//! where +//! A: Comparable, +//! B: Comparable, +//! { +//! fn compare(&self, key: &(C, D)) -> Ordering { +//! match self.0.compare(&key.0) { +//! Ordering::Equal => self.1.compare(&key.1), +//! not_equal => not_equal, +//! } +//! } +//! } +//! +//! fn main() { +//! let key = (String::from("foo"), String::from("bar")); +//! let q1 = Pair("foo", "bar"); +//! let q2 = Pair("boo", "bar"); +//! let q3 = Pair("foo", "baz"); +//! +//! assert!(q1.equivalent(&key)); +//! assert!(!q2.equivalent(&key)); +//! assert!(!q3.equivalent(&key)); +//! +//! assert_eq!(q1.compare(&key), Ordering::Equal); +//! assert_eq!(q2.compare(&key), Ordering::Less); +//! assert_eq!(q3.compare(&key), Ordering::Greater); +//! } +//! ``` + +#![no_std] + +use core::borrow::Borrow; +use core::cmp::Ordering; + +/// Key equivalence trait. +/// +/// This trait allows hash table lookup to be customized. It has one blanket +/// implementation that uses the regular solution with `Borrow` and `Eq`, just +/// like `HashMap` does, so that you can pass `&str` to lookup into a map with +/// `String` keys and so on. +/// +/// # Contract +/// +/// The implementor **must** hash like `K`, if it is hashable. +pub trait Equivalent { + /// Compare self to `key` and return `true` if they are equal. + fn equivalent(&self, key: &K) -> bool; +} + +impl Equivalent for Q +where + Q: Eq, + K: Borrow, +{ + #[inline] + fn equivalent(&self, key: &K) -> bool { + PartialEq::eq(self, key.borrow()) + } +} + +/// Key ordering trait. +/// +/// This trait allows ordered map lookup to be customized. It has one blanket +/// implementation that uses the regular solution with `Borrow` and `Ord`, just +/// like `BTreeMap` does, so that you can pass `&str` to lookup into a map with +/// `String` keys and so on. +pub trait Comparable: Equivalent { + /// Compare self to `key` and return their ordering. + fn compare(&self, key: &K) -> Ordering; +} + +impl Comparable for Q +where + Q: Ord, + K: Borrow, +{ + #[inline] + fn compare(&self, key: &K) -> Ordering { + Ord::cmp(self, key.borrow()) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/.cargo_vcs_info.json deleted file mode 100644 index ad16577b8342..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "c39d880d086aa2e771c7190163436e02715d80f3" - }, - "path_in_vcs": "ipld/blockstore" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/CHANGELOG.md deleted file mode 100644 index 15a9f7b681cc..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/CHANGELOG.md +++ /dev/null @@ -1,28 +0,0 @@ -# Changelog - -Changes to the FVM's Blockstore abstraction - -## [Unreleased] - -## 0.2.1 [2024-04-30] - -- Constify `Block::new`. -- Implement `PartialEq` between blocks with different underlying buffer types. -- Improve `Block` `Debug` implementation. - -## 0.2.0 [2023-06-28] - -Breaking Changes: - -- Update cid/multihash. This is a breaking change as it affects the API. - -## 0.1.2 [2023-05-03] - -- Impl blockstore for `Arc`. -- Add a `copy_to` method to the memory blockstore. - -## 0.1.1 [2022-05-16] - -Remove blake2b feature from multihash (we don't need it here). This is technically a breaking change -as downstream could be relying on this (by accident), but they shouldn't be, so we're not going to -bother with a minor version bump. diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/Cargo.toml deleted file mode 100644 index cd2a142c0e6d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -name = "fvm_ipld_blockstore" -version = "0.2.1" -authors = [ - "ChainSafe Systems ", - "Protocol Labs", - "Filecoin Core Devs", -] -description = "Sharded IPLD Blockstore." -license = "MIT OR Apache-2.0" -repository = "https://github.com/filecoin-project/ref-fvm" - -[dependencies.anyhow] -version = "1.0.71" - -[dependencies.cid] -version = "0.10.1" -features = [ - "serde-codec", - "std", -] -default-features = false - -[dependencies.multihash] -version = "0.18.1" -features = ["multihash-impl"] -default-features = false - -[features] -default = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/Cargo.toml.orig deleted file mode 100644 index 87f76a22655c..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/Cargo.toml.orig +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "fvm_ipld_blockstore" -description = "Sharded IPLD Blockstore." -version = "0.2.1" -license = "MIT OR Apache-2.0" -authors = ["ChainSafe Systems ", "Protocol Labs", "Filecoin Core Devs"] -edition = "2021" -repository = "https://github.com/filecoin-project/ref-fvm" - -[dependencies] -cid = { workspace = true, features = ["serde-codec", "std"] } -anyhow = { workspace = true } -# multihash is also re-exported by `cid`. Having `multihash` here as a -# depdendency is needed to enable the features of the re-export. -multihash = { workspace = true, features = ["multihash-impl"] } - -[features] -default = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/lib.rs deleted file mode 100644 index d131bb5cc7fd..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/lib.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -use std::rc::Rc; -use std::sync::Arc; - -use anyhow::Result; -use cid::{multihash, Cid}; - -pub mod tracking; - -mod memory; -pub use memory::MemoryBlockstore; - -mod block; -pub use block::*; - -/// An IPLD blockstore suitable for injection into the FVM. -/// -/// The cgo blockstore adapter implements this trait. -pub trait Blockstore { - /// Gets the block from the blockstore. - fn get(&self, k: &Cid) -> Result>>; - - /// Put a block with a pre-computed cid. - /// - /// If you don't yet know the CID, use put. Some blockstores will re-compute the CID internally - /// even if you provide it. - /// - /// If you _do_ already know the CID, use this method as some blockstores _won't_ recompute it. - fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()>; - - /// Checks if the blockstore has the specified block. - fn has(&self, k: &Cid) -> Result { - Ok(self.get(k)?.is_some()) - } - - /// Puts the block into the blockstore, computing the hash with the specified multicodec. - /// - /// By default, this defers to put. - fn put(&self, mh_code: multihash::Code, block: &Block) -> Result - where - Self: Sized, - D: AsRef<[u8]>, - { - let k = block.cid(mh_code); - self.put_keyed(&k, block.as_ref())?; - Ok(k) - } - - /// Bulk put blocks into the blockstore. - /// - /// - /// ```rust - /// use multihash::Code::Blake2b256; - /// use fvm_ipld_blockstore::{Blockstore, MemoryBlockstore, Block}; - /// - /// let bs = MemoryBlockstore::default(); - /// let blocks = vec![Block::new(0x55, vec![0, 1, 2])]; - /// bs.put_many(blocks.iter().map(|b| (Blake2b256, b.into()))).unwrap(); - /// ``` - fn put_many(&self, blocks: I) -> Result<()> - where - Self: Sized, - D: AsRef<[u8]>, - I: IntoIterator)>, - { - self.put_many_keyed(blocks.into_iter().map(|(mc, b)| (b.cid(mc), b)))?; - Ok(()) - } - - /// Bulk-put pre-keyed blocks into the blockstore. - /// - /// By default, this defers to put_keyed. - fn put_many_keyed(&self, blocks: I) -> Result<()> - where - Self: Sized, - D: AsRef<[u8]>, - I: IntoIterator, - { - for (c, b) in blocks { - self.put_keyed(&c, b.as_ref())? - } - Ok(()) - } -} - -pub trait Buffered: Blockstore { - fn flush(&self, root: &Cid) -> Result<()>; -} - -macro_rules! impl_blockstore { - ($($typ:ty),+) => { - $( - impl Blockstore for $typ where - BS: Blockstore, { - fn get(&self, k: &Cid) -> Result>> { - (**self).get(k) - } - - fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> { - (**self).put_keyed(k, block) - } - - fn has(&self, k: &Cid) -> Result { - (**self).has(k) - } - - fn put(&self, mh_code: multihash::Code, block: &Block) -> Result - where - Self: Sized, - D: AsRef<[u8]>, - { - (**self).put(mh_code, block) - } - - fn put_many(&self, blocks: I) -> Result<()> - where - Self: Sized, - D: AsRef<[u8]>, - I: IntoIterator)>, - { - (**self).put_many(blocks) - } - - fn put_many_keyed(&self, blocks: I) -> Result<()> - where - Self: Sized, - D: AsRef<[u8]>, - I: IntoIterator, - { - (**self).put_many_keyed(blocks) - } - } - )+ - } -} - -impl_blockstore!(Arc, Rc, &BS); diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..267d3693fd4e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "9172edcb01ede1d7d50744f1182ffca7275f710c" + }, + "path_in_vcs": "ipld/blockstore" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/CHANGELOG.md new file mode 100644 index 000000000000..3053419ce2bb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/CHANGELOG.md @@ -0,0 +1,38 @@ +# Changelog + +Changes to the FVM's Blockstore abstraction + +## [Unreleased] + +## 0.3.1 [2024-11-08] + +Remove unnecessary features from `multihash-codetable`. + +## 0.3.0 [2024-10-31] + +Update cid to v0.11 and multihash to v0.19. + +You will have to update your multihash and cid crates to be compatible, see the [multihash release notes](https://github.com/multiformats/rust-multihash/blob/master/CHANGELOG.md#-2023-06-06) for details on the breaking changes. + +## 0.2.1 [2024-04-30] + +- Constify `Block::new`. +- Implement `PartialEq` between blocks with different underlying buffer types. +- Improve `Block` `Debug` implementation. + +## 0.2.0 [2023-06-28] + +Breaking Changes: + +- Update cid/multihash. This is a breaking change as it affects the API. + +## 0.1.2 [2023-05-03] + +- Impl blockstore for `Arc`. +- Add a `copy_to` method to the memory blockstore. + +## 0.1.1 [2022-05-16] + +Remove blake2b feature from multihash (we don't need it here). This is technically a breaking change +as downstream could be relying on this (by accident), but they shouldn't be, so we're not going to +bother with a minor version bump. diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/Cargo.toml new file mode 100644 index 000000000000..230cb5ddfc64 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/Cargo.toml @@ -0,0 +1,41 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "fvm_ipld_blockstore" +version = "0.3.1" +authors = [ + "ChainSafe Systems ", + "Protocol Labs", + "Filecoin Core Devs", +] +description = "Sharded IPLD Blockstore." +license = "MIT OR Apache-2.0" +repository = "https://github.com/filecoin-project/ref-fvm" + +[dependencies.anyhow] +version = "1.0.71" + +[dependencies.cid] +version = "0.11.1" +features = [ + "serde-codec", + "std", +] +default-features = false + +[dependencies.multihash-codetable] +version = "0.1.4" +default-features = false + +[features] +default = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/Cargo.toml.orig new file mode 100644 index 000000000000..a8a263826c58 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/Cargo.toml.orig @@ -0,0 +1,16 @@ +[package] +name = "fvm_ipld_blockstore" +description = "Sharded IPLD Blockstore." +version = "0.3.1" +license = "MIT OR Apache-2.0" +authors = ["ChainSafe Systems ", "Protocol Labs", "Filecoin Core Devs"] +edition = "2021" +repository = "https://github.com/filecoin-project/ref-fvm" + +[dependencies] +cid = { workspace = true, features = ["serde-codec", "std"] } +anyhow = { workspace = true } +multihash-codetable = { workspace = true } + +[features] +default = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/block.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/block.rs similarity index 94% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/block.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/block.rs index 9648bfb7e6b9..1603680a6d30 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/block.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/block.rs @@ -2,8 +2,8 @@ use std::fmt; // Copyright 2021-2023 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use cid::multihash::{self, MultihashDigest}; use cid::Cid; +use multihash_codetable::MultihashDigest; /// Block represents a typed (i.e., with codec) IPLD block. #[derive(Copy, Clone)] @@ -27,7 +27,7 @@ where Self { codec, data } } - pub fn cid(&self, mh_code: multihash::Code) -> Cid { + pub fn cid(&self, mh_code: multihash_codetable::Code) -> Cid { Cid::new_v1(self.codec, mh_code.digest(self.data.as_ref())) } diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/lib.rs new file mode 100644 index 000000000000..fe5134a0e07f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/lib.rs @@ -0,0 +1,138 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::rc::Rc; +use std::sync::Arc; + +use anyhow::Result; +use cid::Cid; + +pub mod tracking; + +mod memory; +pub use memory::MemoryBlockstore; + +mod block; +pub use block::*; + +/// An IPLD blockstore suitable for injection into the FVM. +/// +/// The cgo blockstore adapter implements this trait. +pub trait Blockstore { + /// Gets the block from the blockstore. + fn get(&self, k: &Cid) -> Result>>; + + /// Put a block with a pre-computed cid. + /// + /// If you don't yet know the CID, use put. Some blockstores will re-compute the CID internally + /// even if you provide it. + /// + /// If you _do_ already know the CID, use this method as some blockstores _won't_ recompute it. + fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()>; + + /// Checks if the blockstore has the specified block. + fn has(&self, k: &Cid) -> Result { + Ok(self.get(k)?.is_some()) + } + + /// Puts the block into the blockstore, computing the hash with the specified multicodec. + /// + /// By default, this defers to put. + fn put(&self, mh_code: multihash_codetable::Code, block: &Block) -> Result + where + Self: Sized, + D: AsRef<[u8]>, + { + let k = block.cid(mh_code); + self.put_keyed(&k, block.as_ref())?; + Ok(k) + } + + /// Bulk put blocks into the blockstore. + /// + /// + /// ```rust + /// use multihash_codetable::Code::Blake2b256; + /// use fvm_ipld_blockstore::{Blockstore, MemoryBlockstore, Block}; + /// + /// let bs = MemoryBlockstore::default(); + /// let blocks = vec![Block::new(0x55, vec![0, 1, 2])]; + /// bs.put_many(blocks.iter().map(|b| (Blake2b256, b.into()))).unwrap(); + /// ``` + fn put_many(&self, blocks: I) -> Result<()> + where + Self: Sized, + D: AsRef<[u8]>, + I: IntoIterator)>, + { + self.put_many_keyed(blocks.into_iter().map(|(mc, b)| (b.cid(mc), b)))?; + Ok(()) + } + + /// Bulk-put pre-keyed blocks into the blockstore. + /// + /// By default, this defers to put_keyed. + fn put_many_keyed(&self, blocks: I) -> Result<()> + where + Self: Sized, + D: AsRef<[u8]>, + I: IntoIterator, + { + for (c, b) in blocks { + self.put_keyed(&c, b.as_ref())? + } + Ok(()) + } +} + +pub trait Buffered: Blockstore { + fn flush(&self, root: &Cid) -> Result<()>; +} + +macro_rules! impl_blockstore { + ($($typ:ty),+) => { + $( + impl Blockstore for $typ where + BS: Blockstore, { + fn get(&self, k: &Cid) -> Result>> { + (**self).get(k) + } + + fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> { + (**self).put_keyed(k, block) + } + + fn has(&self, k: &Cid) -> Result { + (**self).has(k) + } + + fn put(&self, mh_code: multihash_codetable::Code, block: &Block) -> Result + where + Self: Sized, + D: AsRef<[u8]>, + { + (**self).put(mh_code, block) + } + + fn put_many(&self, blocks: I) -> Result<()> + where + Self: Sized, + D: AsRef<[u8]>, + I: IntoIterator)>, + { + (**self).put_many(blocks) + } + + fn put_many_keyed(&self, blocks: I) -> Result<()> + where + Self: Sized, + D: AsRef<[u8]>, + I: IntoIterator, + { + (**self).put_many_keyed(blocks) + } + } + )+ + } +} + +impl_blockstore!(Arc, Rc, &BS); diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/memory.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/memory.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/memory.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/memory.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/tracking.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/tracking.rs similarity index 97% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/tracking.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/tracking.rs index 8ddc1cff56d1..85d8c24a079f 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.2.1/src/tracking.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_blockstore-0.3.1/src/tracking.rs @@ -5,8 +5,8 @@ use std::cell::RefCell; use anyhow::Result; -use cid::multihash::{self, Code}; use cid::Cid; +use multihash_codetable::Code; use super::{Block, Blockstore}; @@ -83,7 +83,7 @@ where where Self: Sized, D: AsRef<[u8]>, - I: IntoIterator)>, + I: IntoIterator)>, { let mut stats = self.stats.borrow_mut(); self.base.put_many(blocks.into_iter().inspect(|(_, b)| { diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/.cargo_vcs_info.json deleted file mode 100644 index c3c6f83a2728..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "3ad5b90f35085e26585e47674844bb72ef09fb06" - }, - "path_in_vcs": "ipld/encoding" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/CHANGELOG.md deleted file mode 100644 index a9f917539cc7..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/CHANGELOG.md +++ /dev/null @@ -1,47 +0,0 @@ -# Changelog - -Changes to the FVM's shared encoding utilities. - -## [Unreleased] - -## 0.4.0 [2023-06-28) - -Breaking Changes: - -- Update cid/multihash. This is a breaking change as it affects the API. - -## 0.3.3 [2023-01-19] - -- Add the `CBOR` codec, and support it in `IpldBlock` -- Add Debug formatting for `IpldBlock` -- Mark `Cbor` trait as deprecated - -## 0.3.2 [2022-12-17] - -- IpldBlock::serialize_cbor returns Option instead of IpldBlock - -## 0.3.1 [2022-12-17] - -- Add new `IpldBlock` type that supports both `DAG_CBOR` and `IPLD_RAW` codecs - -## 0.3.0 [2022-10-11] - -- Publicly use `serde` to expose it when developing actors. -- Expose a new `strict_bytes` module based on `serde_bytes`. This new module: - - Refuses to decode anything that's not "bytes" (like `cs_serde_bytes`). - - Can also decode into a fixed-sized array. - - Has ~1% of the code of upstream. - -## 0.2.2 [2022-06-13] - -Change the hash length assert into an actual check, just in case. - -## 0.2.1 [2022-05-19] - -Update `serde_ipld_cbor` to 0.2.2. - -## 0.2.0 [2022-04-29] - -Update `serde_ipld_cbor` to 0.2.0, switching to cbor4ii. - -The only breaking change is that `from_reader` now requires `io::BufRead`, not just `io::Read`. diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/Cargo.toml deleted file mode 100644 index be40266be622..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/Cargo.toml +++ /dev/null @@ -1,67 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -name = "fvm_ipld_encoding" -version = "0.4.0" -authors = [ - "ChainSafe Systems ", - "Protocol Labs", - "Filecoin Core Devs", -] -description = "Sharded IPLD encoding." -license = "MIT OR Apache-2.0" -repository = "https://github.com/filecoin-project/ref-fvm" - -[dependencies.anyhow] -version = "1.0.71" - -[dependencies.cid] -version = "0.10.1" -features = [ - "serde-codec", - "std", -] -default-features = false - -[dependencies.fvm_ipld_blockstore] -version = "0.2" - -[dependencies.multihash] -version = "0.18.1" -features = [ - "blake2b", - "multihash-impl", -] -default-features = false - -[dependencies.serde] -version = "1.0" -features = ["derive"] - -[dependencies.serde_ipld_dagcbor] -version = "0.4.0" - -[dependencies.serde_repr] -version = "0.1" - -[dependencies.serde_tuple] -version = "0.5" - -[dependencies.thiserror] -version = "1.0" - -[dev-dependencies.serde_json] -version = "1.0.99" - -[features] -default = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/Cargo.toml.orig deleted file mode 100644 index 51b34c707423..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/Cargo.toml.orig +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "fvm_ipld_encoding" -description = "Sharded IPLD encoding." -version = "0.4.0" -license = "MIT OR Apache-2.0" -authors = ["ChainSafe Systems ", "Protocol Labs", "Filecoin Core Devs"] -edition = "2021" -repository = "https://github.com/filecoin-project/ref-fvm" - -[dependencies] -serde = { version = "1.0", features = ["derive"] } -serde_ipld_dagcbor = "0.4.0" -serde_tuple = "0.5" -serde_repr = "0.1" -cid = { workspace = true, features = ["serde-codec", "std"] } -thiserror = "1.0" -anyhow = "1.0.71" -fvm_ipld_blockstore = { version = "0.2", path = "../blockstore" } -# multihash is also re-exported by `cid`. Having `multihash` here as a -# depdendency is needed to enable the features of the re-export. -multihash = { workspace = true, features = ["blake2b", "multihash-impl"] } - -[features] -default = [] - -[dev-dependencies] -serde_json = "1.0.99" diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/lib.rs deleted file mode 100644 index 805b1e5c550b..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/lib.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -mod bytes; -mod cbor; -mod cbor_store; -mod errors; -pub mod ipld_block; -mod raw; -mod vec; -use std::io; - -pub use serde::{self, de, ser}; - -pub use self::bytes::*; -pub use self::cbor::*; -pub use self::cbor_store::CborStore; -pub use self::errors::*; -pub use self::vec::*; - -/// CBOR should be used to pass CBOR data when internal links don't need to be -/// traversable/reachable. When a CBOR block is loaded, said links will not be added to the -/// reachable set. -pub const CBOR: u64 = 0x51; -/// DagCBOR should be used for all IPLD-CBOR data where CIDs need to be traversable. -pub const DAG_CBOR: u64 = 0x71; -/// RAW should be used for raw data. -pub const IPLD_RAW: u64 = 0x55; - -// TODO: these really don't work all that well in a shared context like this as anyone importing -// them also need to _explicitly_ import the serde_tuple & serde_repr crates. These are _macros_, -// not normal items. - -pub mod tuple { - pub use serde_tuple::{self, Deserialize_tuple, Serialize_tuple}; -} - -pub mod repr { - pub use serde_repr::{Deserialize_repr, Serialize_repr}; -} - -/// Serializes a value to a vector. -pub fn to_vec(value: &T) -> Result, Error> -where - T: ser::Serialize + ?Sized, -{ - serde_ipld_dagcbor::to_vec(value).map_err(Into::into) -} - -/// Decode a value from CBOR from the given reader. -pub fn from_reader(reader: R) -> Result -where - T: de::DeserializeOwned, - R: io::BufRead, -{ - serde_ipld_dagcbor::from_reader(reader).map_err(Into::into) -} - -/// Decode a value from CBOR from the given slice. -pub fn from_slice<'a, T>(slice: &'a [u8]) -> Result -where - T: de::Deserialize<'a>, -{ - serde_ipld_dagcbor::from_slice(slice).map_err(Into::into) -} - -/// Encode a value as CBOR to the given writer. -pub fn to_writer(mut writer: W, value: &T) -> Result<(), Error> -where - W: io::Write, - T: ser::Serialize, -{ - serde_ipld_dagcbor::to_writer(&mut writer, value).map_err(Into::into) -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/raw.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/raw.rs deleted file mode 100644 index 10c4bb4936b6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/raw.rs +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -use thiserror::Error; - -/// Serialize the given value to a vec. This method rejects all types except "raw bytes". -pub fn to_vec(value: &T) -> Result, super::Error> { - serde::Serialize::serialize(value, Serializer).map_err(|e| super::Error { - description: e.to_string(), - protocol: crate::CodecProtocol::Raw, - }) -} - -#[derive(Error, Debug)] -enum Error { - #[error("IPLD kind not supported by the raw codec")] - KindNotSupported, - #[error("i/o error when serializing: {0}")] - Other(String), -} - -impl serde::ser::Error for Error { - fn custom(msg: T) -> Self - where - T: std::fmt::Display, - { - Error::Other(msg.to_string()) - } -} - -struct Serializer; - -macro_rules! reject { - ($($method:ident $t:ty),*) => { - $( - fn $method(self, _: $t) -> Result { - Err(Error::KindNotSupported) - } - )* - }; -} - -impl serde::ser::Serializer for Serializer { - type Ok = Vec; - type Error = Error; - type SerializeSeq = serde::ser::Impossible, Error>; - type SerializeTuple = serde::ser::Impossible, Error>; - type SerializeTupleStruct = serde::ser::Impossible, Error>; - type SerializeTupleVariant = serde::ser::Impossible, Error>; - type SerializeMap = serde::ser::Impossible, Error>; - type SerializeStruct = serde::ser::Impossible, Error>; - type SerializeStructVariant = serde::ser::Impossible, Error>; - - fn serialize_bytes(self, v: &[u8]) -> Result { - Ok(v.to_owned()) - } - - reject! { - serialize_bool bool, - serialize_i8 i8, - serialize_i16 i16, - serialize_i32 i32, - serialize_i64 i64, - serialize_u8 u8, - serialize_u16 u16, - serialize_u32 u32, - serialize_u64 u64, - serialize_f32 f32, - serialize_f64 f64, - serialize_char char, - serialize_str &str, - serialize_unit_struct &'static str - } - - fn serialize_none(self) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_some(self, _: &T) -> Result - where - T: serde::Serialize, - { - Err(Error::KindNotSupported) - } - - fn serialize_unit(self) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_unit_variant( - self, - _: &'static str, - _: u32, - _: &'static str, - ) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_newtype_struct( - self, - _: &'static str, - _: &T, - ) -> Result - where - T: serde::Serialize, - { - Err(Error::KindNotSupported) - } - - fn serialize_newtype_variant( - self, - _: &'static str, - _: u32, - _: &'static str, - _: &T, - ) -> Result - where - T: serde::Serialize, - { - Err(Error::KindNotSupported) - } - - fn serialize_seq(self, _: Option) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_tuple(self, _: usize) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_tuple_struct( - self, - _: &'static str, - _: usize, - ) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_tuple_variant( - self, - _: &'static str, - _: u32, - _: &'static str, - _: usize, - ) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_map(self, _: Option) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_struct( - self, - _: &'static str, - _: usize, - ) -> Result { - Err(Error::KindNotSupported) - } - - fn serialize_struct_variant( - self, - _: &'static str, - _: u32, - _: &'static str, - _: usize, - ) -> Result { - Err(Error::KindNotSupported) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..b0a8361f1c8b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "9172edcb01ede1d7d50744f1182ffca7275f710c" + }, + "path_in_vcs": "ipld/encoding" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/CHANGELOG.md new file mode 100644 index 000000000000..359981e63d75 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/CHANGELOG.md @@ -0,0 +1,57 @@ +# Changelog + +Changes to the FVM's shared encoding utilities. + +## [Unreleased] + +## 0.5.1 [2024-11-08] + +Remove unnecessary features from `multihash-codetable`. + +## 0.5.0 [2024-10-31] + +Update `cid` to v0.11 and `multihash` to v0.19. + +You will have to update your multihash and cid crates to be compatible, see the [multihash release notes](https://github.com/multiformats/rust-multihash/blob/master/CHANGELOG.md#-2023-06-06) for details on the breaking changes. + +## 0.4.0 [2023-06-28) + +Breaking Changes: + +- Update cid/multihash. This is a breaking change as it affects the API. + +## 0.3.3 [2023-01-19] + +- Add the `CBOR` codec, and support it in `IpldBlock` +- Add Debug formatting for `IpldBlock` +- Mark `Cbor` trait as deprecated + +## 0.3.2 [2022-12-17] + +- IpldBlock::serialize_cbor returns Option instead of IpldBlock + +## 0.3.1 [2022-12-17] + +- Add new `IpldBlock` type that supports both `DAG_CBOR` and `IPLD_RAW` codecs + +## 0.3.0 [2022-10-11] + +- Publicly use `serde` to expose it when developing actors. +- Expose a new `strict_bytes` module based on `serde_bytes`. This new module: + - Refuses to decode anything that's not "bytes" (like `cs_serde_bytes`). + - Can also decode into a fixed-sized array. + - Has ~1% of the code of upstream. + +## 0.2.2 [2022-06-13] + +Change the hash length assert into an actual check, just in case. + +## 0.2.1 [2022-05-19] + +Update `serde_ipld_cbor` to 0.2.2. + +## 0.2.0 [2022-04-29] + +Update `serde_ipld_cbor` to 0.2.0, switching to cbor4ii. + +The only breaking change is that `from_reader` now requires `io::BufRead`, not just `io::Read`. diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/Cargo.toml new file mode 100644 index 000000000000..dd794b725eb4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/Cargo.toml @@ -0,0 +1,65 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "fvm_ipld_encoding" +version = "0.5.1" +authors = [ + "ChainSafe Systems ", + "Protocol Labs", + "Filecoin Core Devs", +] +description = "Sharded IPLD encoding." +license = "MIT OR Apache-2.0" +repository = "https://github.com/filecoin-project/ref-fvm" + +[dependencies.anyhow] +version = "1.0.71" + +[dependencies.cid] +version = "0.11.1" +features = [ + "serde", + "std", +] +default-features = false + +[dependencies.fvm_ipld_blockstore] +version = "0.3.1" + +[dependencies.multihash-codetable] +version = "0.1.4" +features = ["blake2b"] +default-features = false + +[dependencies.serde] +version = "1.0.164" +features = ["derive"] +default-features = false + +[dependencies.serde_ipld_dagcbor] +version = "0.6.1" + +[dependencies.serde_repr] +version = "0.1" + +[dependencies.serde_tuple] +version = "0.5.0" + +[dependencies.thiserror] +version = "1.0.40" + +[dev-dependencies.serde_json] +version = "1.0.99" + +[features] +default = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/Cargo.toml.orig new file mode 100644 index 000000000000..674f23988192 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/Cargo.toml.orig @@ -0,0 +1,25 @@ +[package] +name = "fvm_ipld_encoding" +description = "Sharded IPLD encoding." +version = "0.5.1" +license = "MIT OR Apache-2.0" +authors = ["ChainSafe Systems ", "Protocol Labs", "Filecoin Core Devs"] +edition = "2021" +repository = "https://github.com/filecoin-project/ref-fvm" + +[dependencies] +serde = { workspace = true } +serde_tuple = { workspace = true } +cid = { workspace = true, features = ["serde", "std"] } +thiserror = { workspace = true } +anyhow = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +multihash-codetable = { workspace = true, features = ["blake2b"] } +serde_ipld_dagcbor = "0.6.1" +serde_repr = "0.1" + +[features] +default = [] + +[dev-dependencies] +serde_json = { workspace = true } diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/bytes.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/bytes.rs similarity index 99% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/bytes.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/bytes.rs index df019855656a..3ceee3c6014e 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/bytes.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/bytes.rs @@ -28,9 +28,9 @@ pub mod strict_bytes { S: Serializer; } - impl Serialize for T + impl Serialize for T where - T: AsRef<[u8]>, + T: AsRef<[u8]> + ?Sized, { fn serialize(&self, serializer: S) -> Result where diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/cbor.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/cbor.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/cbor.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/cbor.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/cbor_store.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/cbor_store.rs similarity index 77% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/cbor_store.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/cbor_store.rs index 5a740ffed847..34b5317b8cd7 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/cbor_store.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/cbor_store.rs @@ -1,10 +1,11 @@ // Copyright 2021-2023 Protocol Labs // SPDX-License-Identifier: Apache-2.0, MIT -use cid::{multihash, Cid}; +use anyhow::anyhow; +use cid::Cid; use fvm_ipld_blockstore::{Block, Blockstore}; use serde::{de, ser}; -use crate::DAG_CBOR; +use crate::{CBOR, DAG_CBOR}; /// Wrapper for database to handle inserting and retrieving ipld data with Cids pub trait CborStore: Blockstore + Sized { @@ -13,6 +14,9 @@ pub trait CborStore: Blockstore + Sized { where T: de::DeserializeOwned, { + if !matches!(cid.codec(), CBOR | DAG_CBOR) { + return Err(anyhow!("{} is not CBOR or DagCBOR", cid.codec())); + } match self.get(cid)? { Some(bz) => { let res = crate::from_slice(&bz)?; @@ -23,7 +27,7 @@ pub trait CborStore: Blockstore + Sized { } /// Put an object in the block store and return the Cid identifier. - fn put_cbor(&self, obj: &S, code: multihash::Code) -> anyhow::Result + fn put_cbor(&self, obj: &S, code: multihash_codetable::Code) -> anyhow::Result where S: ser::Serialize, { diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/errors.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/errors.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/errors.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/errors.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/ipld_block.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/ipld_block.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/ipld_block.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/ipld_block.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/lib.rs new file mode 100644 index 000000000000..4bc570171661 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/lib.rs @@ -0,0 +1,77 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod bytes; +mod cbor; +mod cbor_store; +mod errors; +pub mod ipld_block; +mod raw; +mod vec; +use std::io; + +pub use serde::{self, de, ser}; + +pub use self::bytes::*; +pub use self::cbor::*; +pub use self::cbor_store::CborStore; +pub use self::errors::*; +pub use self::vec::*; + +/// CBOR should be used to pass CBOR data when internal links don't need to be +/// traversable/reachable. When a CBOR block is loaded, said links will not be added to the +/// reachable set. +pub const CBOR: u64 = 0x51; +/// DagCBOR should be used for all IPLD-CBOR data where CIDs need to be traversable. +pub const DAG_CBOR: u64 = 0x71; +/// RAW should be used for raw data. +pub const IPLD_RAW: u64 = 0x55; + +pub type Multihash = cid::multihash::Multihash<64>; + +// TODO: these really don't work all that well in a shared context like this as anyone importing +// them also need to _explicitly_ import the serde_tuple & serde_repr crates. These are _macros_, +// not normal items. + +pub mod tuple { + pub use serde_tuple::{self, Deserialize_tuple, Serialize_tuple}; +} + +pub mod repr { + pub use serde_repr::{Deserialize_repr, Serialize_repr}; +} + +/// Serializes a value to a vector. +pub fn to_vec(value: &T) -> Result, Error> +where + T: ser::Serialize + ?Sized, +{ + serde_ipld_dagcbor::to_vec(value).map_err(Into::into) +} + +/// Decode a value from CBOR from the given reader. +pub fn from_reader(reader: R) -> Result +where + T: de::DeserializeOwned, + R: io::BufRead, +{ + serde_ipld_dagcbor::from_reader(reader).map_err(Into::into) +} + +/// Decode a value from CBOR from the given slice. +pub fn from_slice<'a, T>(slice: &'a [u8]) -> Result +where + T: de::Deserialize<'a>, +{ + serde_ipld_dagcbor::from_slice(slice).map_err(Into::into) +} + +/// Encode a value as CBOR to the given writer. +pub fn to_writer(mut writer: W, value: &T) -> Result<(), Error> +where + W: io::Write, + T: ser::Serialize, +{ + serde_ipld_dagcbor::to_writer(&mut writer, value).map_err(Into::into) +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/raw.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/raw.rs new file mode 100644 index 000000000000..ed551ada6e1a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/raw.rs @@ -0,0 +1,165 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use thiserror::Error; + +/// Serialize the given value to a vec. This method rejects all types except "raw bytes". +pub fn to_vec(value: &T) -> Result, super::Error> { + serde::Serialize::serialize(value, Serializer).map_err(|e| super::Error { + description: e.to_string(), + protocol: crate::CodecProtocol::Raw, + }) +} + +#[derive(Error, Debug)] +enum Error { + #[error("IPLD kind not supported by the raw codec")] + KindNotSupported, + #[error("i/o error when serializing: {0}")] + Other(String), +} + +impl serde::ser::Error for Error { + fn custom(msg: T) -> Self + where + T: std::fmt::Display, + { + Error::Other(msg.to_string()) + } +} + +struct Serializer; + +macro_rules! reject { + ($($method:ident $t:ty),*) => { + $( + fn $method(self, _: $t) -> Result { + Err(Error::KindNotSupported) + } + )* + }; +} + +impl serde::ser::Serializer for Serializer { + type Ok = Vec; + type Error = Error; + type SerializeSeq = serde::ser::Impossible, Error>; + type SerializeTuple = serde::ser::Impossible, Error>; + type SerializeTupleStruct = serde::ser::Impossible, Error>; + type SerializeTupleVariant = serde::ser::Impossible, Error>; + type SerializeMap = serde::ser::Impossible, Error>; + type SerializeStruct = serde::ser::Impossible, Error>; + type SerializeStructVariant = serde::ser::Impossible, Error>; + + fn serialize_bytes(self, v: &[u8]) -> Result { + Ok(v.to_owned()) + } + + reject! { + serialize_bool bool, + serialize_i8 i8, + serialize_i16 i16, + serialize_i32 i32, + serialize_i64 i64, + serialize_u8 u8, + serialize_u16 u16, + serialize_u32 u32, + serialize_u64 u64, + serialize_f32 f32, + serialize_f64 f64, + serialize_char char, + serialize_str &str, + serialize_unit_struct &'static str + } + + fn serialize_none(self) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_some(self, _: &T) -> Result + where + T: serde::Serialize + ?Sized, + { + Err(Error::KindNotSupported) + } + + fn serialize_unit(self) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_unit_variant( + self, + _: &'static str, + _: u32, + _: &'static str, + ) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_newtype_struct(self, _: &'static str, _: &T) -> Result + where + T: serde::Serialize + ?Sized, + { + Err(Error::KindNotSupported) + } + + fn serialize_newtype_variant( + self, + _: &'static str, + _: u32, + _: &'static str, + _: &T, + ) -> Result + where + T: serde::Serialize + ?Sized, + { + Err(Error::KindNotSupported) + } + + fn serialize_seq(self, _: Option) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_tuple(self, _: usize) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_tuple_struct( + self, + _: &'static str, + _: usize, + ) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_tuple_variant( + self, + _: &'static str, + _: u32, + _: &'static str, + _: usize, + ) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_map(self, _: Option) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_struct( + self, + _: &'static str, + _: usize, + ) -> Result { + Err(Error::KindNotSupported) + } + + fn serialize_struct_variant( + self, + _: &'static str, + _: u32, + _: &'static str, + _: usize, + ) -> Result { + Err(Error::KindNotSupported) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/vec.rs b/third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/vec.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.4.0/src/vec.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_ipld_encoding-0.5.1/src/vec.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/.cargo_vcs_info.json deleted file mode 100644 index 131c83921405..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "3c5da7eafaa100132598b37a67d8c77c8605fabc" - }, - "path_in_vcs": "shared" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/CHANGELOG.md deleted file mode 100644 index becbaf50678d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/CHANGELOG.md +++ /dev/null @@ -1,212 +0,0 @@ -# Changelog - -## [Unreleased] - -## 3.10.0 [2024-06-12] - -- Update `filecoin-proofs-api` to v18 -- fix: remove the pairing feature from fvm_shared [#2009](https://github.com/filecoin-project/ref-fvm/pull/2009) - -## 3.6.0 (2023-09-06) - -- BREAKING: Upgrade the proofs API to v16. -- BREAKING (linking): upgrade blstrs to v0.7 and -- BREAKING: update the minimum rust version to 1.70.0 -- Update & trim some dependencies. -- Add support for the new proofs in v16. - -## 3.5.0 [2023-08-18] - -- Add the V21 network version constant - -## 3.4.0 [2023-06-27] - -Breaking Changes: - -- Update cid/multihash. This is a breaking change as it affects the API. - -## 3.3.1 [2023-05-04] - -Fix some address constants (lazy statics, to be precise) when the current network is set to "testnet". Previously, if said constants were evaluated _after_ switching to testnet mode (calling `address::set_current_network`), they'd fail to parse and crash the program when dereferenced. - -## 3.3.0 [2023-04-23] - -- Fixes an issue with proof bindings. - -## 3.2.0 [2023-04-04] - -- Remove unused dependencies. -- Remove unused dependencies. -- BREAKING: Drop unused `registered_seal_proof` method. This appears to have been unused by anyone. - -## 3.1.0 [2023-03-09] - -Update proofs. Unfortunately, this is a breaking change in a minor release but we need to do the same on the v2 release as well. The correct solution is to introduce two crates, fvm1 and fvm2, but that's a future project. - -## 3.0.0 [2022-02-24] - -- Final release for NV18. - -## 3.0.0-alpha.20 [2022-02-06] - -- Change the `BLOCK_GAS_LIMIT` constant to a `u64` to match all the other gas values. - -## 3.0.0-alpha.19 [2022-02-06] - -- Change the event datastructure to take a codec and not double-encode the value. -- Make the message version and gas limits `u64`s instead of `i64`s. - -## 3.0.0-alpha.18 [2022-02-01] - -- Improve rustdocs around events and gas premium. - -## 3.0.0-alpha.17 [2022-01-17] - -- Add `hyperspace` feature to loosen up network version restrictions. - -## 3.0.0-alpha.16 [2023-01-12] - -- Remove uses of the Cbor trait -- Refactor: Move Response from SDK to shared - -## 3.0.0-alpha.15 [2022-12-14] - -- Refactor: ChainID was moved from FVM to shared -- Implement Ethereum Account abstraction - - Removes the f4-as-accont feature, and support for Delegated signature validations - -## 3.0.0-alpha.14 [2022-12-07] - -- Remove GasLimit from the message context. -- Add the message nonce to the message context -- Add the chain ID to the network context. - -## 3.0.0-alpha.13 [2022-11-29] - -- Remove deprecated SYS_INVALID_METHOD exit code -- Add a read-only mode to Sends - - Adds ContextFlags to MessageContext, and a special ReadOnly error - -## 3.0.0-alpha.12 [2022-11-17] - -- Refactor network/message contexts to reduce the number of syscalls. - -## 3.0.0-alpha.11 [2022-11-15] - -- Add support for actor events (FIP-0049). - -## 3.0.0-alpha.10 [2022-11-14] - -- Split `InvokeContext` into two (#1070) -- fix: correctly format negative token amounts (#1065) - -## 3.0.0-alpha.9 [2022-11-08] - -- Add support for state-tree v5. - -## 3.0.0-alpha.8 [2022-10-22] - -- fix compile issues with f4-as-account feature. - -## 3.0.0-alpha.7 [2022-10-21] - -- Temporary workaround: allow validating signatures from embryo f4 addresses - -## 3.0.0-alpha.6 [2022-10-20] - -- Make the f4 address conform to FIP0048 (use `f` as the separator). -- Implement `TryFrom` for `DelegatedAddress` (and make `DelegatedAddress` public). - -## 3.0.0-alpha.5 [2022-10-10] - -- Bumps `fvm_ipld_encoding` and switches from `cs_serde_bytes` to `fvm_ipld_encoding::strict_bytes`. - -## 3.0.0-alpha.4 [2022-10-10] - -- Small f4 address fixes. - -## 3.0.0-alpha.3 [2022-10-10] - -- Switch to rust 2021 edition. -- Add network version 18. -- BREAKING: Allow changing the address "network" at runtime. -- BREAKING: Update the f4 address format and include a checksum. -- BREAKING: Add the gas premium and gas limit to the `vm::context` return type. - -## 3.0.0-alpha.2 [2022-09-16] - -- Add basic f4 address support (without checksums for now). -- Change TokenAmount::from_whole to take any `Into` parameter. -- Add nv17 to the network versions. - -The only breaking change is the change to `Address`/`Protocol` (in case anyone is exhaustively matching on them). - -## 3.0.0-alpha.1 [2022-08-31] - -- Bump base version to v3. -- Add `origin` to `vm::Context`. - -## 2.0.0... - -See the `release/v2` branch. - -- Add recover secp public key syscall. -- Removed `actor::builtin::Type` (moved to the actors themselves). -- Add additional hash functions to the hash syscall. -- Add blake2b512 -- Change TokenAmount from a type alias to a struct wrapping BigInt - -## 0.8.0 [2022-06-13] - -- Add a new proofs version type. - -## 0.7.1 [2022-05-26] - -Add a shared `MAX_CID_LEN` constant. - -## 0.7.0 [2022-05-16] - -- Updates the blockstore. -- Removes unnecessary chrono dep. -- Removes the `DomainSeparationTag` type. This is moving into the actors themselves as the FVM - doesn't care about it. - - Downstream crates should just replicate this type internally, if necessary. -- Adds a new `crypto::signature::verify` function to allow verifying signatures without creating a - new `Signature` object. This allows verifying _borrowed_ signatures without allocating. -- Updates for the syscall refactor (see `fvm_sdk` v0.7.0): - - Adds a `BufferTooSmall` `ErrorNumber`. - - Marks `ErrorNumber` as non-exhaustive for future extension. - - Changes the syscall "out" types for the syscall refactor. - -## 0.6.1 [2022-04-29] - -- Added `testing` feature to have `Default` derive on `Message`. Extended this feature to `Address` and `Payload`. -- Improve `ErrorNumber` documentation. -- Update `fvm_ipld_encoding` for the cbor encoder switch. - -## 0.6.0 [2022-04-14] - -BREAKING: Switch syscall struct alignment: https://github.com/filecoin-project/fvm-specs/issues/63 - -Actors built against this new version of fvm_shared will be incompatible with prior FVM versions, -and vice-versa. - -- Added `Display` trait to `Type` for error printing. -- Added _cfg = "testing"_ on `Default` trait for `Message` structure. - -## 0.5.1 [2022-04-11] - -Add the `USR_ASSERTION_FAILED` exit code. - -## 0.5.0 [2022-04-11] - -- Enforce maximum big-int size to match lotus. -- Make signature properties public. -- Major error type refactor. - -The largest change here is a major error type refactor. - -1. It's now a u32 with a set of pre-defined values instead of an enum. -2. The error codes have been reworked according to the FVM spec. - -Both of these changes were made to better support user-defined actors. diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/Cargo.toml deleted file mode 100644 index db0fdf30b70e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/Cargo.toml +++ /dev/null @@ -1,144 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -name = "fvm_shared" -version = "3.10.0" -authors = [ - "ChainSafe Systems ", - "Protocol Labs", - "Filecoin Core Devs", -] -description = "Filecoin Virtual Machine shared types and functions" -license = "MIT OR Apache-2.0" -repository = "https://github.com/filecoin-project/ref-fvm" - -[dependencies.anyhow] -version = "1.0.71" - -[dependencies.arbitrary] -version = "1.3" -features = ["derive"] -optional = true - -[dependencies.bitflags] -version = "2.3.3" -features = ["serde"] - -[dependencies.blake2b_simd] -version = "1.0.1" - -[dependencies.bls-signatures] -version = "0.15" -optional = true -default-features = false - -[dependencies.cid] -version = "0.10.1" -features = [ - "serde-codec", - "std", -] -default-features = false - -[dependencies.data-encoding] -version = "2.4.0" - -[dependencies.data-encoding-macro] -version = "0.1.13" - -[dependencies.filecoin-proofs-api] -version = "18" -optional = true -default-features = false - -[dependencies.fvm_ipld_encoding] -version = "0.4" - -[dependencies.lazy_static] -version = "1.4.0" - -[dependencies.libsecp256k1] -version = "0.7" -optional = true - -[dependencies.multihash] -version = "0.18.1" -default-features = false - -[dependencies.num-bigint] -version = "0.4" - -[dependencies.num-derive] -version = "0.4" - -[dependencies.num-integer] -version = "0.1" - -[dependencies.num-traits] -version = "0.2" - -[dependencies.quickcheck] -version = "1" -optional = true - -[dependencies.serde] -version = "1" -default-features = false - -[dependencies.serde_tuple] -version = "0.5" - -[dependencies.thiserror] -version = "1.0" - -[dependencies.unsigned-varint] -version = "0.7.1" - -[dev-dependencies.multihash] -version = "0.18.1" -features = [ - "multihash-impl", - "sha2", - "sha3", - "ripemd", -] -default-features = false - -[dev-dependencies.quickcheck_macros] -version = "1" - -[dev-dependencies.rand] -version = "0.8" - -[dev-dependencies.rand_chacha] -version = "0.3" - -[dev-dependencies.serde_json] -version = "1.0.99" - -[features] -arb = [ - "arbitrary", - "dep:quickcheck", - "num-bigint/quickcheck", -] -blst = ["bls-signatures/blst"] -crypto = [ - "libsecp256k1", - "blst", - "proofs", -] -default = [] -proofs = ["filecoin-proofs-api"] -secp256k1 = ["libsecp256k1"] -testing = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/Cargo.toml.orig deleted file mode 100644 index 2c8198595ac4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/Cargo.toml.orig +++ /dev/null @@ -1,54 +0,0 @@ -[package] -name = "fvm_shared" -description = "Filecoin Virtual Machine shared types and functions" -version = "3.10.0" -edition = "2021" -license = "MIT OR Apache-2.0" -authors = ["ChainSafe Systems ", "Protocol Labs", "Filecoin Core Devs"] -repository = "https://github.com/filecoin-project/ref-fvm" - -[dependencies] -blake2b_simd = "1.0.1" -thiserror = "1.0" -num-traits = "0.2" -num-derive = "0.4" -num-bigint = "0.4" -num-integer = "0.1" -data-encoding = "2.4.0" -data-encoding-macro = "0.1.13" -lazy_static = "1.4.0" -cid = { workspace = true, features = ["serde-codec", "std"] } -multihash = { workspace = true } -unsigned-varint = "0.7.1" -anyhow = "1.0.71" -fvm_ipld_encoding = { version = "0.4" } -serde = { version = "1", default-features = false } -serde_tuple = "0.5" -arbitrary = { version = "1.3", optional = true, features = ["derive"] } -quickcheck = { version = "1", optional = true } -bitflags = { version = "2.3.3", features = ["serde"] } - -## non-wasm dependencies; these dependencies and the respective code is -## only activated through non-default features, which the Kernel enables, but -## not the actors. -filecoin-proofs-api = { version = "18", default-features = false, optional = true } -libsecp256k1 = { version = "0.7", optional = true } -bls-signatures = { version = "0.15", default-features = false, optional = true } - -[dev-dependencies] -rand = "0.8" -rand_chacha = "0.3" -serde_json = "1.0.99" -multihash = { workspace = true, features = ["multihash-impl", "sha2", "sha3", "ripemd"] } -quickcheck_macros = "1" - -fvm_shared = { path = ".", features = ["arb"] } - -[features] -default = [] -crypto = ["libsecp256k1", "blst", "proofs"] -proofs = ["filecoin-proofs-api"] -secp256k1 = ["libsecp256k1"] -blst = ["bls-signatures/blst"] -testing = [] -arb = ["arbitrary", "dep:quickcheck", "num-bigint/quickcheck"] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/mod.rs deleted file mode 100644 index c4367ae8bf5d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/mod.rs +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -mod errors; -mod network; -mod payload; -mod protocol; - -use std::borrow::Cow; -use std::fmt; -use std::hash::Hash; -use std::str::FromStr; - -use data_encoding::Encoding; -use data_encoding_macro::new_encoding; -use fvm_ipld_encoding::strict_bytes; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - -pub use self::errors::Error; -pub use self::network::{current_network, set_current_network, Network}; -pub use self::payload::{DelegatedAddress, Payload}; -pub use self::protocol::Protocol; -use crate::ActorID; - -/// defines the encoder for base32 encoding with the provided string with no padding -const ADDRESS_ENCODER: Encoding = new_encoding! { - symbols: "abcdefghijklmnopqrstuvwxyz234567", - padding: None, -}; - -/// Hash length of payload for Secp and Actor addresses. -pub const PAYLOAD_HASH_LEN: usize = 20; - -/// Uncompressed secp public key used for validation of Secp addresses. -pub const SECP_PUB_LEN: usize = 65; - -/// BLS public key length used for validation of BLS addresses. -pub const BLS_PUB_LEN: usize = 48; - -/// Max length of f4 sub addresses. -pub const MAX_SUBADDRESS_LEN: usize = 54; - -/// Defines first available ID address after builtin actors -pub const FIRST_NON_SINGLETON_ADDR: ActorID = 100; - -lazy_static::lazy_static! { - static ref BLS_ZERO_ADDR_BYTES: [u8; BLS_PUB_LEN] = { - let bz_addr = Network::Mainnet.parse_address("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a"); - if let Ok(Address {payload: Payload::BLS(pubkey), ..}) = bz_addr { - pubkey - } else { - panic!("failed to parse BLS address from provided BLS_ZERO_ADDR string") - } - }; -} - -/// Length of the checksum hash for string encodings. -pub const CHECKSUM_HASH_LEN: usize = 4; - -/// The max encoded length of an address. -pub const MAX_ADDRESS_LEN: usize = 65; - -const MAX_ADDRRESS_TEXT_LEN: usize = 138; -const MAINNET_PREFIX: &str = "f"; -const TESTNET_PREFIX: &str = "t"; - -/// Address is the struct that defines the protocol and data payload conversion from either -/// a public key or value -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] -#[cfg_attr(feature = "testing", derive(Default))] -#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] -pub struct Address { - payload: Payload, -} - -impl Address { - /// Construct a new address with the specified network. - fn new(protocol: Protocol, bz: &[u8]) -> Result { - Ok(Self { - payload: Payload::new(protocol, bz)?, - }) - } - - /// Creates address from encoded bytes. - pub fn from_bytes(bz: &[u8]) -> Result { - if bz.len() < 2 { - Err(Error::InvalidLength) - } else { - let protocol = Protocol::from_byte(bz[0]).ok_or(Error::UnknownProtocol)?; - Self::new(protocol, &bz[1..]) - } - } - - /// Generates new address using ID protocol. - pub const fn new_id(id: u64) -> Self { - Self { - payload: Payload::ID(id), - } - } - - /// Generates new address using Secp256k1 pubkey. - pub fn new_secp256k1(pubkey: &[u8]) -> Result { - if pubkey.len() != SECP_PUB_LEN { - return Err(Error::InvalidSECPLength(pubkey.len())); - } - Ok(Self { - payload: Payload::Secp256k1(address_hash(pubkey)), - }) - } - - /// Generates new address using the Actor protocol. - pub fn new_actor(data: &[u8]) -> Self { - Self { - payload: Payload::Actor(address_hash(data)), - } - } - - /// Generates a new delegated address from a namespace and a subaddress. - pub fn new_delegated(ns: ActorID, subaddress: &[u8]) -> Result { - Ok(Self { - payload: Payload::Delegated(DelegatedAddress::new(ns, subaddress)?), - }) - } - - /// Generates new address using BLS pubkey. - pub fn new_bls(pubkey: &[u8]) -> Result { - if pubkey.len() != BLS_PUB_LEN { - return Err(Error::InvalidBLSLength(pubkey.len())); - } - let mut key = [0u8; BLS_PUB_LEN]; - key.copy_from_slice(pubkey); - Ok(Self { - payload: Payload::BLS(key), - }) - } - - pub fn is_bls_zero_address(&self) -> bool { - match self.payload { - Payload::BLS(payload_bytes) => payload_bytes == *BLS_ZERO_ADDR_BYTES, - _ => false, - } - } - - /// Returns protocol for Address - pub fn protocol(&self) -> Protocol { - Protocol::from(self.payload) - } - - /// Returns the `Payload` object from the address, where the respective protocol data is kept - /// in an enum separated by protocol - pub fn payload(&self) -> &Payload { - &self.payload - } - - /// Converts Address into `Payload` object, where the respective protocol data is kept - /// in an enum separated by protocol - pub fn into_payload(self) -> Payload { - self.payload - } - - /// Returns the raw bytes data payload of the Address - pub fn payload_bytes(&self) -> Vec { - self.payload.to_raw_bytes() - } - - /// Returns encoded bytes of Address - pub fn to_bytes(self) -> Vec { - self.payload.to_bytes() - } - - /// Get ID of the address. ID protocol only. - pub fn id(&self) -> Result { - match self.payload { - Payload::ID(id) => Ok(id), - _ => Err(Error::NonIDAddress), - } - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let protocol = self.protocol(); - - // write `fP` where P is the protocol number. - write!(f, "{}{}", current_network().to_prefix(), protocol)?; - - fn write_payload( - f: &mut fmt::Formatter<'_>, - protocol: Protocol, - prefix: Option<&[u8]>, - data: &[u8], - ) -> fmt::Result { - let mut hasher = blake2b_simd::Params::new() - .hash_length(CHECKSUM_HASH_LEN) - .to_state(); - hasher.update(&[protocol as u8]); - if let Some(prefix) = prefix { - hasher.update(prefix); - } - hasher.update(data); - - let mut buf = Vec::with_capacity(data.len() + CHECKSUM_HASH_LEN); - buf.extend(data); - buf.extend(hasher.finalize().as_bytes()); - - f.write_str(&ADDRESS_ENCODER.encode(&buf)) - } - - match self.payload() { - Payload::ID(id) => write!(f, "{}", id), - Payload::Secp256k1(data) | Payload::Actor(data) => { - write_payload(f, protocol, None, data) - } - Payload::BLS(data) => write_payload(f, protocol, None, data), - Payload::Delegated(addr) => { - write!(f, "{}f", addr.namespace())?; - write_payload( - f, - protocol, - Some(unsigned_varint::encode::u64( - addr.namespace(), - &mut unsigned_varint::encode::u64_buffer(), - )), - addr.subaddress(), - ) - } - } - } -} - -#[cfg(feature = "arb")] -impl quickcheck::Arbitrary for Address { - fn arbitrary(g: &mut quickcheck::Gen) -> Self { - Self { - payload: Payload::arbitrary(g), - } - } -} - -pub(self) fn parse_address(addr: &str) -> Result<(Address, Network), Error> { - if addr.len() > MAX_ADDRRESS_TEXT_LEN || addr.len() < 3 { - return Err(Error::InvalidLength); - } - let network = Network::from_prefix(addr.get(0..1).ok_or(Error::UnknownNetwork)?)?; - - // get protocol from second character - let protocol: Protocol = match addr.get(1..2).ok_or(Error::UnknownProtocol)? { - "0" => Protocol::ID, - "1" => Protocol::Secp256k1, - "2" => Protocol::Actor, - "3" => Protocol::BLS, - "4" => Protocol::Delegated, - _ => { - return Err(Error::UnknownProtocol); - } - }; - - fn validate_and_split_checksum<'a>( - protocol: Protocol, - prefix: Option<&[u8]>, - payload: &'a [u8], - ) -> Result<&'a [u8], Error> { - if payload.len() < CHECKSUM_HASH_LEN { - return Err(Error::InvalidLength); - } - let (payload, csum) = payload.split_at(payload.len() - CHECKSUM_HASH_LEN); - let mut hasher = blake2b_simd::Params::new() - .hash_length(CHECKSUM_HASH_LEN) - .to_state(); - hasher.update(&[protocol as u8]); - if let Some(prefix) = prefix { - hasher.update(prefix); - } - hasher.update(payload); - if hasher.finalize().as_bytes() != csum { - return Err(Error::InvalidChecksum); - } - Ok(payload) - } - - // bytes after the protocol character is the data payload of the address - let raw = addr.get(2..).ok_or(Error::InvalidPayload)?; - let addr = match protocol { - Protocol::ID => { - if raw.len() > 20 { - // 20 is max u64 as string - return Err(Error::InvalidLength); - } - let id = raw.parse::()?; - Address { - payload: Payload::ID(id), - } - } - Protocol::Delegated => { - let (id, subaddr) = raw.split_once('f').ok_or(Error::InvalidPayload)?; - if id.len() > 20 { - // 20 is max u64 as string - return Err(Error::InvalidLength); - } - let id = id.parse::()?; - // decode subaddr - let subaddr_csum = ADDRESS_ENCODER.decode(subaddr.as_bytes())?; - // validate and split subaddr. - let subaddr = validate_and_split_checksum( - protocol, - Some(unsigned_varint::encode::u64( - id, - &mut unsigned_varint::encode::u64_buffer(), - )), - &subaddr_csum, - )?; - - Address { - payload: Payload::Delegated(DelegatedAddress::new(id, subaddr)?), - } - } - Protocol::Secp256k1 | Protocol::Actor | Protocol::BLS => { - // decode using byte32 encoding - let payload_csum = ADDRESS_ENCODER.decode(raw.as_bytes())?; - // validate and split payload. - let payload = validate_and_split_checksum(protocol, None, &payload_csum)?; - - // sanity check to make sure address hash values are correct length - if match protocol { - Protocol::Secp256k1 | Protocol::Actor => PAYLOAD_HASH_LEN, - Protocol::BLS => BLS_PUB_LEN, - _ => unreachable!(), - } != payload.len() - { - return Err(Error::InvalidPayload); - } - - Address::new(protocol, payload)? - } - }; - Ok((addr, network)) -} - -impl FromStr for Address { - type Err = Error; - fn from_str(addr: &str) -> Result { - current_network().parse_address(addr) - } -} - -impl Serialize for Address { - fn serialize(&self, s: S) -> Result - where - S: Serializer, - { - let address_bytes = self.to_bytes(); - strict_bytes::Serialize::serialize(&address_bytes, s) - } -} - -impl<'de> Deserialize<'de> for Address { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bz: Cow<'de, [u8]> = strict_bytes::Deserialize::deserialize(deserializer)?; - - // Create and return created address of unmarshalled bytes - Address::from_bytes(&bz).map_err(de::Error::custom) - } -} - -pub(crate) fn to_leb_bytes(id: u64) -> Vec { - // write id to buffer in leb128 format - unsigned_varint::encode::u64(id, &mut unsigned_varint::encode::u64_buffer()).into() -} - -pub(crate) fn from_leb_bytes(bz: &[u8]) -> Result { - // write id to buffer in leb128 format - let (id, remaining) = unsigned_varint::decode::u64(bz)?; - if !remaining.is_empty() { - return Err(Error::InvalidPayload); - } - Ok(id) -} - -#[cfg(test)] -mod tests { - // Test cases for FOR-02: https://github.com/ChainSafe/forest/issues/1134 - use crate::address::errors::Error; - use crate::address::{from_leb_bytes, to_leb_bytes}; - - #[test] - fn test_from_leb_bytes_passing() { - let passing = vec![67]; - assert_eq!(to_leb_bytes(from_leb_bytes(&passing).unwrap()), vec![67]); - } - - #[test] - fn test_from_leb_bytes_extra_bytes() { - let extra_bytes = vec![67, 0, 1, 2]; - - match from_leb_bytes(&extra_bytes) { - Ok(id) => { - println!( - "Successfully decoded bytes when it was not supposed to. Result was: {:?}", - &to_leb_bytes(id) - ); - panic!(); - } - Err(e) => { - assert_eq!(e, Error::InvalidPayload); - } - } - } - - #[test] - fn test_from_leb_bytes_minimal_encoding() { - let minimal_encoding = vec![67, 0, 130, 0]; - - match from_leb_bytes(&minimal_encoding) { - Ok(id) => { - println!( - "Successfully decoded bytes when it was not supposed to. Result was: {:?}", - &to_leb_bytes(id) - ); - panic!(); - } - Err(e) => { - assert_eq!(e, Error::InvalidPayload); - } - } - } -} - -/// Returns an address hash for given data -fn address_hash(ingest: &[u8]) -> [u8; 20] { - let digest = blake2b_simd::Params::new() - .hash_length(PAYLOAD_HASH_LEN) - .to_state() - .update(ingest) - .finalize(); - - let mut hash = [0u8; 20]; - hash.copy_from_slice(digest.as_bytes()); - hash -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/network.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/network.rs deleted file mode 100644 index dfbd581f3f86..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/network.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use std::sync::atomic::{AtomicU8, Ordering}; - -use num_derive::{FromPrimitive, ToPrimitive}; -use num_traits::{FromPrimitive, ToPrimitive}; - -use super::{Address, Error, MAINNET_PREFIX, TESTNET_PREFIX}; - -static ATOMIC_NETWORK: AtomicU8 = AtomicU8::new(0); - -/// Network defines the preconfigured networks to use with address encoding -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, FromPrimitive, ToPrimitive)] -#[repr(u8)] -#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] -#[derive(Default)] -pub enum Network { - #[default] - Mainnet = 0, - Testnet = 1, -} - -impl Network { - /// to_prefix is used to convert the network into a string - /// used when converting address to string - pub(super) fn to_prefix(self) -> &'static str { - match self { - Network::Mainnet => MAINNET_PREFIX, - Network::Testnet => TESTNET_PREFIX, - } - } - - /// from_prefix is used to convert the network from a string - /// used when parsing - pub(super) fn from_prefix(s: &str) -> Result { - match s { - MAINNET_PREFIX => Ok(Network::Mainnet), - TESTNET_PREFIX => Ok(Network::Testnet), - _ => Err(Error::UnknownNetwork), - } - } - - /// Parse an address belonging to this network. - pub fn parse_address(self, addr: &str) -> Result { - let (addr, network) = super::parse_address(addr)?; - if network != self { - return Err(Error::UnknownNetwork); - } - Ok(addr) - } -} - -/// Gets the current network. -pub fn current_network() -> Network { - Network::from_u8(ATOMIC_NETWORK.load(Ordering::Relaxed)).unwrap_or_default() -} - -/// Sets the default network. -/// -/// The network is used to differentiate between different filecoin networks _in text_ but isn't -/// actually encoded in the binary representation of addresses. Changing the current network will: -/// -/// 1. Change the prefix used when formatting an address as a string. -/// 2. Change the prefix _accepted_ when parsing an address. -pub fn set_current_network(network: Network) { - ATOMIC_NETWORK.store(network.to_u8().unwrap_or_default(), Ordering::Relaxed) -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use super::*; - use crate::address::Address; - - #[test] - fn set_network() { - assert_eq!(current_network(), Network::default()); - assert_eq!(Network::default(), Network::Mainnet); - - // We're in mainnet mode. - let addr1 = Address::from_str("f01"); - Address::from_str("t01").expect_err("should have failed to parse testnet address"); - assert_eq!( - addr1, - Network::Testnet.parse_address("t01"), - "parsing an explicit address should still work" - ); - - // Switch to testnet mode. - set_current_network(Network::Testnet); - - // Now we're in testnet mode. - let addr2 = Address::from_str("t01"); - Address::from_str("f01").expect_err("should have failed to parse testnet address"); - - // Networks are relevent for parsing only. - assert_eq!(addr1, addr2) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/hash.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/hash.rs deleted file mode 100644 index f47e72142115..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/hash.rs +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -#[repr(u64)] -pub enum SupportedHashes { - Sha2_256 = 0x12, - Blake2b256 = 0xb220, - Blake2b512 = 0xb240, - Keccak256 = 0x1b, - Ripemd160 = 0x1053, -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/econ/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/econ/mod.rs deleted file mode 100644 index 458d6cfb52ab..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/econ/mod.rs +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -use std::cmp::Ordering; -use std::fmt; -use std::iter::Sum; -use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; - -use num_bigint::BigInt; -use num_integer::Integer; -use num_traits::{Signed, Zero}; -use serde::{Deserialize, Serialize, Serializer}; - -use crate::bigint::bigint_ser; - -/// A quantity of native tokens. -/// A token amount is an integer, but has a human interpretation as a value with -/// 18 decimal places. -/// This is a new-type in order to prevent accidental conversion from other BigInts. -/// From/Into BigInt is missing by design. -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct TokenAmount { - atto: BigInt, -} - -// This type doesn't implement all the numeric traits (Num, Signed, etc), -// opting for a minimal useful set. Others can be added if needed. -impl TokenAmount { - /// The logical number of decimal places of a token unit. - pub const DECIMALS: usize = 18; - - /// The logical precision of a token unit. - pub const PRECISION: u64 = 10u64.pow(Self::DECIMALS as u32); - - /// Creates a token amount from a quantity of indivisible units (10^-18 whole units). - pub fn from_atto(atto: impl Into) -> Self { - Self { atto: atto.into() } - } - - /// Creates a token amount from nanoFIL. - pub fn from_nano(nano: impl Into) -> Self { - const NANO_PRECISION: u64 = 10u64.pow((TokenAmount::DECIMALS as u32) - 9); - Self { - atto: nano.into() * NANO_PRECISION, - } - } - - /// Creates a token amount from a quantity of whole units (10^18 indivisible units). - pub fn from_whole(tokens: impl Into) -> Self { - Self::from_atto(tokens.into() * Self::PRECISION) - } - - /// Returns the quantity of indivisible units. - pub fn atto(&self) -> &BigInt { - &self.atto - } - - pub fn is_zero(&self) -> bool { - self.atto.is_zero() - } - - pub fn is_positive(&self) -> bool { - self.atto.is_positive() - } - - pub fn is_negative(&self) -> bool { - self.atto.is_negative() - } -} - -impl Zero for TokenAmount { - #[inline] - fn zero() -> Self { - Self { - atto: BigInt::zero(), - } - } - - #[inline] - fn is_zero(&self) -> bool { - self.atto.is_zero() - } -} - -impl PartialOrd for TokenAmount { - #[inline] - fn partial_cmp(&self, other: &Self) -> Option { - self.atto.partial_cmp(&other.atto) - } -} - -impl Ord for TokenAmount { - #[inline] - fn cmp(&self, other: &Self) -> Ordering { - self.atto.cmp(&other.atto) - } -} - -impl Default for TokenAmount { - #[inline] - fn default() -> TokenAmount { - TokenAmount::zero() - } -} - -impl fmt::Debug for TokenAmount { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "TokenAmount({})", self) - } -} - -#[cfg(feature = "arb")] -impl quickcheck::Arbitrary for TokenAmount { - fn arbitrary(g: &mut quickcheck::Gen) -> Self { - TokenAmount::from_atto(BigInt::arbitrary(g)) - } -} - -/// Displays a token amount as a decimal in human units. -/// To avoid any confusion over whether the value is in human-scale or indivisible units, -/// the display always includes a decimal point. -impl fmt::Display for TokenAmount { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Implementation based on the bigdecimal library. - let (q, r) = self.atto.div_rem(&BigInt::from(Self::PRECISION)); - let before_decimal = q.abs().to_str_radix(10); - let after_decimal = if r.is_zero() { - "0".to_string() - } else { - let fraction_str = r.abs().to_str_radix(10); - let render = "0".repeat(Self::DECIMALS - fraction_str.len()) + fraction_str.as_str(); - render.trim_end_matches('0').to_string() - }; - - // Alter precision after the decimal point - let after_decimal = if let Some(precision) = f.precision() { - let len = after_decimal.len(); - if len < precision { - after_decimal + "0".repeat(precision - len).as_str() - } else { - after_decimal[0..precision].to_string() - } - } else { - after_decimal - }; - - // Always show the decimal point, even with ".0". - let complete_without_sign = before_decimal + "." + after_decimal.as_str(); - // Padding works even though we have a decimal point. - f.pad_integral(!self.atto().is_negative(), "", &complete_without_sign) - } -} - -impl Neg for TokenAmount { - type Output = TokenAmount; - - #[inline] - fn neg(self) -> TokenAmount { - TokenAmount { atto: -self.atto } - } -} - -impl<'a> Neg for &'a TokenAmount { - type Output = TokenAmount; - - #[inline] - fn neg(self) -> TokenAmount { - TokenAmount { - atto: (&self.atto).neg(), - } - } -} - -// Implements Add for all combinations of value/reference receiver and parameter. -// (Pattern copied from BigInt multiplication). -macro_rules! impl_add { - ($(impl<$($a:lifetime),*> Add<$Other:ty> for $Self:ty;)*) => {$( - impl<$($a),*> Add<$Other> for $Self { - type Output = TokenAmount; - - #[inline] - fn add(self, other: $Other) -> TokenAmount { - // automatically match value/ref - let TokenAmount { atto: x, .. } = self; - let TokenAmount { atto: y, .. } = other; - TokenAmount {atto: x + y} - } - } - )*} -} -impl_add! { - impl<> Add for TokenAmount; - impl<'b> Add<&'b TokenAmount> for TokenAmount; - impl<'a> Add for &'a TokenAmount; - impl<'a, 'b> Add<&'b TokenAmount> for &'a TokenAmount; -} - -impl AddAssign for TokenAmount { - #[inline] - fn add_assign(&mut self, other: TokenAmount) { - self.atto += &other.atto; - } -} - -impl<'a> AddAssign<&'a TokenAmount> for TokenAmount { - #[inline] - fn add_assign(&mut self, other: &TokenAmount) { - self.atto += &other.atto; - } -} - -// Implements Sub for all combinations of value/reference receiver and parameter. -macro_rules! impl_sub { - ($(impl<$($a:lifetime),*> Sub<$Other:ty> for $Self:ty;)*) => {$( - impl<$($a),*> Sub<$Other> for $Self { - type Output = TokenAmount; - - #[inline] - fn sub(self, other: $Other) -> TokenAmount { - // automatically match value/ref - let TokenAmount { atto: x, .. } = self; - let TokenAmount { atto: y, .. } = other; - TokenAmount {atto: x - y} - } - } - )*} -} -impl_sub! { - impl<> Sub for TokenAmount; - impl<'b> Sub<&'b TokenAmount> for TokenAmount; - impl<'a> Sub for &'a TokenAmount; - impl<'a, 'b> Sub<&'b TokenAmount> for &'a TokenAmount; -} - -impl SubAssign for TokenAmount { - #[inline] - fn sub_assign(&mut self, other: TokenAmount) { - self.atto -= &other.atto; - } -} - -impl<'a> SubAssign<&'a TokenAmount> for TokenAmount { - #[inline] - fn sub_assign(&mut self, other: &TokenAmount) { - self.atto -= &other.atto; - } -} - -impl Mul for TokenAmount -where - BigInt: Mul, -{ - type Output = TokenAmount; - - fn mul(self, rhs: T) -> Self::Output { - TokenAmount { - atto: self.atto * rhs, - } - } -} - -impl<'a, T> Mul for &'a TokenAmount -where - &'a BigInt: Mul, -{ - type Output = TokenAmount; - - fn mul(self, rhs: T) -> Self::Output { - TokenAmount { - atto: &self.atto * rhs, - } - } -} - -macro_rules! impl_mul { - ($(impl<$($a:lifetime),*> Mul<$Other:ty> for $Self:ty;)*) => {$( - impl<$($a),*> Mul<$Other> for $Self { - type Output = TokenAmount; - - #[inline] - fn mul(self, other: $Other) -> TokenAmount { - other * self - } - } - )*} -} - -macro_rules! impl_muls { - ($($t:ty,)*) => {$( - impl_mul! { - impl<> Mul for $t; - impl<'b> Mul<&'b TokenAmount> for $t; - impl<'a> Mul for &'a $t; - impl<'a, 'b> Mul<&'b TokenAmount> for &'a $t; - } - )*}; -} - -impl_muls! { - u8, u16, u32, u64, u128, - i8, i16, i32, i64, i128, - BigInt, -} - -impl MulAssign for TokenAmount -where - BigInt: MulAssign, -{ - #[inline] - fn mul_assign(&mut self, other: T) { - self.atto *= other; - } -} - -// Only a single div/rem method is implemented, rather than the full Div and Rem traits. -// Division isn't a common operation with money-like units, and deserves to be treated carefully. -impl TokenAmount { - #[inline] - pub fn div_rem(&self, other: impl Into) -> (TokenAmount, TokenAmount) { - let (q, r) = self.atto.div_rem(&other.into()); - (TokenAmount { atto: q }, TokenAmount { atto: r }) - } - - #[inline] - pub fn div_ceil(&self, other: impl Into) -> TokenAmount { - TokenAmount { - atto: self.atto.div_ceil(&other.into()), - } - } - - #[inline] - pub fn div_floor(&self, other: impl Into) -> TokenAmount { - TokenAmount { - atto: self.atto.div_floor(&other.into()), - } - } -} - -impl Sum for TokenAmount { - fn sum>(iter: I) -> Self { - Self::from_atto(iter.map(|t| t.atto).sum::()) - } -} - -impl<'a> Sum<&'a TokenAmount> for TokenAmount { - fn sum>(iter: I) -> Self { - Self::from_atto(iter.map(|t| &t.atto).sum::()) - } -} - -// Serialisation - -impl Serialize for TokenAmount { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - bigint_ser::serialize(&self.atto, serializer) - } -} - -impl<'de> Deserialize<'de> for TokenAmount { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - bigint_ser::deserialize(deserializer).map(|v| TokenAmount { atto: v }) - } -} - -#[cfg(test)] -mod test { - use num_bigint::BigInt; - use num_traits::Zero; - - use crate::TokenAmount; - - fn whole(x: impl Into) -> TokenAmount { - TokenAmount::from_whole(x) - } - - fn atto(x: impl Into) -> TokenAmount { - TokenAmount::from_atto(x.into()) - } - - #[test] - fn display_basic() { - fn basic(expected: &str, t: TokenAmount) { - assert_eq!(expected, format!("{}", t)); - } - - basic("0.0", TokenAmount::zero()); - basic("0.000000000000000001", atto(1)); - basic("0.000000000000001", atto(1000)); - basic("0.1234", atto(123_400_000_000_000_000_u64)); - basic("0.10101", atto(101_010_000_000_000_000_u64)); - basic("1.0", whole(1)); - basic("1.0", atto(1_000_000_000_000_000_000_u128)); - basic("1.1", atto(1_100_000_000_000_000_000_u128)); - basic("1.000000000000000001", atto(1_000_000_000_000_000_001_u128)); - basic( - "1234.000000000123456789", - whole(1234) + atto(123_456_789_u64), - ); - } - - #[test] - fn display_precision() { - assert_eq!("0.0", format!("{:.1}", TokenAmount::zero())); - assert_eq!("0.000", format!("{:.3}", TokenAmount::zero())); - assert_eq!("0.000", format!("{:.3}", atto(1))); // Truncated. - assert_eq!( - "0.123", - format!("{:.3}", atto(123_456_789_000_000_000_u64)) // Truncated. - ); - assert_eq!( - "0.123456789000", - format!("{:.12}", atto(123_456_789_000_000_000_u64)) - ); - } - - #[test] - fn display_padding() { - assert_eq!("0.0", format!("{:01}", TokenAmount::zero())); - assert_eq!("0.0", format!("{:03}", TokenAmount::zero())); - assert_eq!("000.0", format!("{:05}", TokenAmount::zero())); - assert_eq!( - "0.123", - format!("{:01.3}", atto(123_456_789_000_000_000_u64)) - ); - assert_eq!( - "00.123", - format!("{:06.3}", atto(123_456_789_000_000_000_u64)) - ); - } - - #[test] - fn display_negative() { - assert_eq!("-0.000001", format!("{:01}", -TokenAmount::from_nano(1000))); - } - - #[test] - fn ops() { - // Test the basic operations are wired up correctly. - assert_eq!(atto(15), atto(10) + atto(5)); - assert_eq!(atto(3), atto(10) - atto(7)); - assert_eq!(atto(12), atto(3) * 4); - let (q, r) = atto(14).div_rem(4); - assert_eq!((atto(3), atto(2)), (q, r)); - - let mut a = atto(1); - a += atto(2); - assert_eq!(atto(3), a); - a *= 2; - assert_eq!(atto(6), a); - a -= atto(2); - assert_eq!(atto(4), a); - } - - #[test] - fn nano_fil() { - assert_eq!( - TokenAmount::from_nano(1), - TokenAmount::from_whole(1).div_floor(10u64.pow(9)) - ) - } - - #[test] - fn test_mul() { - let a = atto(2) * 3; - let b = 3 * atto(2); - assert_eq!(a, atto(6)); - assert_eq!(a, b); - } - - #[test] - fn test_sum() { - assert_eq!( - [1, 2, 3, 4].into_iter().map(atto).sum::(), - atto(10) - ); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/error/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/error/mod.rs deleted file mode 100644 index fb4a06a9a0b2..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/error/mod.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -use std::fmt::Formatter; - -use num_derive::FromPrimitive; -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -/// ExitCode defines the exit code from the VM invocation. -#[derive(PartialEq, Eq, Debug, Clone, Copy, Serialize, Deserialize)] -#[serde(transparent)] -#[repr(transparent)] -pub struct ExitCode { - value: u32, -} - -impl ExitCode { - pub const fn new(value: u32) -> Self { - Self { value } - } - - pub fn value(self) -> u32 { - self.value - } - - /// Returns true if the exit code indicates success. - pub fn is_success(self) -> bool { - self.value == 0 - } - - /// Returns true if the error code is in the range of exit codes reserved for the VM - /// (including Ok). - pub fn is_system_error(self) -> bool { - self.value < (Self::FIRST_USER_EXIT_CODE) - } -} - -impl From for ExitCode { - fn from(value: u32) -> Self { - ExitCode { value } - } -} - -impl std::fmt::Display for ExitCode { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.value) - } -} - -impl ExitCode { - // Exit codes which originate inside the VM. - // These values may not be used by actors when aborting. - - /// The code indicating successful execution. - pub const OK: ExitCode = ExitCode::new(0); - /// The message sender doesn't exist. - pub const SYS_SENDER_INVALID: ExitCode = ExitCode::new(1); - /// The message sender was not in a valid state to send this message. - /// - /// Either: - /// - The sender's nonce nonce didn't match the message nonce. - /// - The sender didn't have the funds to cover the message gas. - pub const SYS_SENDER_STATE_INVALID: ExitCode = ExitCode::new(2); - //pub const SYS_RESERVED_3 ExitCode = ExitCode::new(3); - /// The message receiver trapped (panicked). - pub const SYS_ILLEGAL_INSTRUCTION: ExitCode = ExitCode::new(4); - /// The message receiver doesn't exist and can't be automatically created - pub const SYS_INVALID_RECEIVER: ExitCode = ExitCode::new(5); - /// The message sender didn't have the requisite funds. - pub const SYS_INSUFFICIENT_FUNDS: ExitCode = ExitCode::new(6); - /// Message execution (including subcalls) used more gas than the specified limit. - pub const SYS_OUT_OF_GAS: ExitCode = ExitCode::new(7); - // pub const SYS_RESERVED_8: ExitCode = ExitCode::new(8); - /// The message receiver aborted with a reserved exit code. - pub const SYS_ILLEGAL_EXIT_CODE: ExitCode = ExitCode::new(9); - /// An internal VM assertion failed. - pub const SYS_ASSERTION_FAILED: ExitCode = ExitCode::new(10); - /// The actor returned a block handle that doesn't exist - pub const SYS_MISSING_RETURN: ExitCode = ExitCode::new(11); - // pub const SYS_RESERVED_12: ExitCode = ExitCode::new(12); - // pub const SYS_RESERVED_13: ExitCode = ExitCode::new(13); - // pub const SYS_RESERVED_14: ExitCode = ExitCode::new(14); - // pub const SYS_RESERVED_15: ExitCode = ExitCode::new(15); - - /// The lowest exit code that an actor may abort with. - pub const FIRST_USER_EXIT_CODE: u32 = 16; - - // Standard exit codes according to the built-in actors' calling convention. - /// The method parameters are invalid. - pub const USR_ILLEGAL_ARGUMENT: ExitCode = ExitCode::new(16); - /// The requested resource does not exist. - pub const USR_NOT_FOUND: ExitCode = ExitCode::new(17); - /// The requested operation is forbidden. - pub const USR_FORBIDDEN: ExitCode = ExitCode::new(18); - /// The actor has insufficient funds to perform the requested operation. - pub const USR_INSUFFICIENT_FUNDS: ExitCode = ExitCode::new(19); - /// The actor's internal state is invalid. - pub const USR_ILLEGAL_STATE: ExitCode = ExitCode::new(20); - /// There was a de/serialization failure within actor code. - pub const USR_SERIALIZATION: ExitCode = ExitCode::new(21); - /// The message cannot be handled (usually indicates an unhandled method number). - pub const USR_UNHANDLED_MESSAGE: ExitCode = ExitCode::new(22); - /// The actor failed with an unspecified error. - pub const USR_UNSPECIFIED: ExitCode = ExitCode::new(23); - /// The actor failed a user-level assertion. - pub const USR_ASSERTION_FAILED: ExitCode = ExitCode::new(24); - /// The requested operation cannot be performed in "read-only" mode. - pub const USR_READ_ONLY: ExitCode = ExitCode::new(25); - /// The method cannot handle a transfer of value. - pub const USR_NOT_PAYABLE: ExitCode = ExitCode::new(26); - // pub const RESERVED_27: ExitCode = ExitCode::new(27); - // pub const RESERVED_28: ExitCode = ExitCode::new(28); - // pub const RESERVED_29: ExitCode = ExitCode::new(29); - // pub const RESERVED_30: ExitCode = ExitCode::new(30); - // pub const RESERVED_31: ExitCode = ExitCode::new(31); -} - -/// When a syscall fails, it returns an `ErrorNumber` to indicate why. The syscalls themselves -/// include documentation on _which_ syscall errors they can be expected to return, and what they -/// mean in the context of the syscall. -#[non_exhaustive] -#[repr(u32)] -#[derive(Copy, Clone, Eq, Debug, PartialEq, Error, FromPrimitive)] -pub enum ErrorNumber { - /// A syscall parameters was invalid. - IllegalArgument = 1, - /// The actor is not in the correct state to perform the requested operation. - IllegalOperation = 2, - /// This syscall would exceed some system limit (memory, lookback, call depth, etc.). - LimitExceeded = 3, - /// A system-level assertion has failed. - /// - /// # Note - /// - /// Non-system actors should never receive this error number. A system-level assertion will - /// cause the entire message to fail. - AssertionFailed = 4, - /// There were insufficient funds to complete the requested operation. - InsufficientFunds = 5, - /// A resource was not found. - NotFound = 6, - /// The specified IPLD block handle was invalid. - InvalidHandle = 7, - /// The requested CID shape (multihash codec, multihash length) isn't supported. - IllegalCid = 8, - /// The requested IPLD codec isn't supported. - IllegalCodec = 9, - /// The IPLD block did not match the specified IPLD codec. - Serialization = 10, - /// The operation is forbidden. - Forbidden = 11, - /// The passed buffer is too small. - BufferTooSmall = 12, - /// The actor is executing in a read-only context. - ReadOnly = 13, -} - -impl std::fmt::Display for ErrorNumber { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - use ErrorNumber::*; - f.write_str(match *self { - IllegalArgument => "illegal argument", - IllegalOperation => "illegal operation", - LimitExceeded => "limit exceeded", - AssertionFailed => "filecoin assertion failed", - InsufficientFunds => "insufficient funds", - NotFound => "resource not found", - InvalidHandle => "invalid ipld block handle", - IllegalCid => "illegal cid specification", - IllegalCodec => "illegal ipld codec", - Serialization => "serialization error", - Forbidden => "operation forbidden", - BufferTooSmall => "buffer too small", - ReadOnly => "execution context is read-only", - }) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/event/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/event/mod.rs deleted file mode 100644 index a57d2097f296..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/event/mod.rs +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -use bitflags::bitflags; -use fvm_ipld_encoding::strict_bytes; -use serde::{Deserialize, Serialize}; -use serde_tuple::*; - -use crate::ActorID; - -/// Event with extra information stamped by the FVM. This is the structure that gets committed -/// on-chain via the receipt. -#[derive(Serialize_tuple, Deserialize_tuple, PartialEq, Eq, Clone, Debug)] -pub struct StampedEvent { - /// Carries the ID of the actor that emitted this event. - pub emitter: ActorID, - /// The event as emitted by the actor. - pub event: ActorEvent, -} - -impl StampedEvent { - pub fn new(emitter: ActorID, event: ActorEvent) -> Self { - Self { emitter, event } - } -} - -/// An event as originally emitted by the actor. -#[derive(Serialize_tuple, Deserialize_tuple, PartialEq, Eq, Clone, Debug)] -#[serde(transparent)] -pub struct ActorEvent { - pub entries: Vec, -} - -impl From> for ActorEvent { - fn from(entries: Vec) -> Self { - Self { entries } - } -} - -bitflags! { - /// Flags associated with an Event entry. - #[derive(Deserialize, Serialize, Copy, Clone, Eq, PartialEq, Debug)] - #[serde(transparent)] - pub struct Flags: u64 { - const FLAG_INDEXED_KEY = 0b00000001; - const FLAG_INDEXED_VALUE = 0b00000010; - const FLAG_INDEXED_ALL = Self::FLAG_INDEXED_KEY.bits() | Self::FLAG_INDEXED_VALUE.bits(); - } -} - -/// A key value entry inside an Event. -#[derive(Serialize_tuple, Deserialize_tuple, PartialEq, Eq, Clone, Debug)] -pub struct Entry { - /// A bitmap conveying metadata or hints about this entry. - pub flags: Flags, - /// The key of this event. - pub key: String, - /// The value's codec. Must be IPLD_RAW (0x55) for now according to FIP-0049. - pub codec: u64, - /// The event's value. - #[serde(with = "strict_bytes")] - pub value: Vec, -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/lib.rs deleted file mode 100644 index 765281597afa..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/lib.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -#[macro_use] -extern crate lazy_static; - -use address::Address; -use clock::ChainEpoch; - -pub mod address; -pub mod bigint; -pub mod chainid; -pub mod clock; -pub mod commcid; -pub mod consensus; -pub mod crypto; -pub mod deal; -pub mod econ; -pub mod error; -pub mod event; -pub mod math; -pub mod message; -pub mod piece; -pub mod randomness; -pub mod receipt; -pub mod reward; -pub mod sector; -pub mod smooth; -pub mod state; -pub mod sys; -pub mod version; - -use econ::TokenAmount; -use fvm_ipld_encoding::ipld_block::IpldBlock; - -use crate::error::ExitCode; - -lazy_static! { - /// Total Filecoin available to the network. - pub static ref TOTAL_FILECOIN: TokenAmount = TokenAmount::from_whole(TOTAL_FILECOIN_BASE); - - /// Zero address used to avoid allowing it to be used for verification. - /// This is intentionally disallowed because it is an edge case with Filecoin's BLS - /// signature verification. - pub static ref ZERO_ADDRESS: Address = address::Network::Mainnet.parse_address("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a").unwrap(); -} - -/// Codec for raw data. -pub const IPLD_RAW: u64 = 0x55; - -/// Multihash code for the identity hash function. -pub const IDENTITY_HASH: u64 = 0x0; - -/// The maximum supported CID size. -pub const MAX_CID_LEN: usize = 100; - -/// Identifier for Actors, includes builtin and initialized actors -pub type ActorID = u64; - -/// Default bit width for the hamt in the filecoin protocol. -pub const HAMT_BIT_WIDTH: u32 = 5; -/// Total gas limit allowed per block. This is shared across networks. -pub const BLOCK_GAS_LIMIT: u64 = 10_000_000_000; -/// Total Filecoin supply. -pub const TOTAL_FILECOIN_BASE: i64 = 2_000_000_000; - -// Epochs -/// Lookback height for retrieving ticket randomness. -pub const TICKET_RANDOMNESS_LOOKBACK: ChainEpoch = 1; -/// Epochs to look back for verifying PoSt proofs. -pub const WINNING_POST_SECTOR_SET_LOOKBACK: ChainEpoch = 10; - -/// The expected number of block producers in each epoch. -pub const BLOCKS_PER_EPOCH: u64 = 5; - -/// Allowable clock drift in validations. -pub const ALLOWABLE_CLOCK_DRIFT: u64 = 1; - -/// Config trait which handles different network configurations. -pub trait NetworkParams { - /// Total filecoin available to network. - const TOTAL_FILECOIN: i64; - - /// Available rewards for mining. - const MINING_REWARD_TOTAL: i64; - - /// Initial reward actor balance. This function is only called in genesis setting up state. - fn initial_reward_balance() -> TokenAmount { - TokenAmount::from_whole(Self::MINING_REWARD_TOTAL) - } -} - -/// Params for the network. This is now continued on into mainnet and is static across networks. -// * This can be removed in the future if the new testnet is configred at build time -// * but the reason to keep as is, is for an easier transition to runtime configuration. -pub struct DefaultNetworkParams; - -impl NetworkParams for DefaultNetworkParams { - const TOTAL_FILECOIN: i64 = TOTAL_FILECOIN_BASE; - const MINING_REWARD_TOTAL: i64 = 1_400_000_000; -} - -/// Method number indicator for calling actor methods. -pub type MethodNum = u64; - -/// Base actor send method. -pub const METHOD_SEND: MethodNum = 0; -/// Base actor constructor method. -pub const METHOD_CONSTRUCTOR: MethodNum = 1; - -/// The outcome of a `Send`, covering its ExitCode and optional return data -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct Response { - pub exit_code: ExitCode, - pub return_data: Option, -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/registered_proof.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/registered_proof.rs deleted file mode 100644 index 2535c19b55ea..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/registered_proof.rs +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -#[cfg(feature = "proofs")] -use std::convert::TryFrom; - -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -use super::SectorSize; -use crate::clock; -use crate::version::NetworkVersion; - -/// Seal proof type which defines the version and sector size. -#[allow(non_camel_case_types)] -#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] -pub enum RegisteredSealProof { - StackedDRG2KiBV1, - StackedDRG512MiBV1, - StackedDRG8MiBV1, - StackedDRG32GiBV1, - StackedDRG64GiBV1, - - StackedDRG2KiBV1P1, - StackedDRG512MiBV1P1, - StackedDRG8MiBV1P1, - StackedDRG32GiBV1P1, - StackedDRG64GiBV1P1, - - StackedDRG2KiBV1P1_Feat_SyntheticPoRep, - StackedDRG512MiBV1P1_Feat_SyntheticPoRep, - StackedDRG8MiBV1P1_Feat_SyntheticPoRep, - StackedDRG32GiBV1P1_Feat_SyntheticPoRep, - StackedDRG64GiBV1P1_Feat_SyntheticPoRep, - // TODO: get rid of this option once we no longer need go compat. - // We use it to ensure that we can deserialize bad values here because go checks this value - // later. - Invalid(i64), -} - -impl RegisteredSealProof { - /// Returns registered seal proof for given sector size - pub fn from_sector_size(size: SectorSize, network_version: NetworkVersion) -> Self { - if network_version < NetworkVersion::V7 { - match size { - SectorSize::_2KiB => Self::StackedDRG2KiBV1, - SectorSize::_8MiB => Self::StackedDRG8MiBV1, - SectorSize::_512MiB => Self::StackedDRG512MiBV1, - SectorSize::_32GiB => Self::StackedDRG32GiBV1, - SectorSize::_64GiB => Self::StackedDRG64GiBV1, - } - } else { - match size { - SectorSize::_2KiB => Self::StackedDRG2KiBV1P1, - SectorSize::_8MiB => Self::StackedDRG8MiBV1P1, - SectorSize::_512MiB => Self::StackedDRG512MiBV1P1, - SectorSize::_32GiB => Self::StackedDRG32GiBV1P1, - SectorSize::_64GiB => Self::StackedDRG64GiBV1P1, - } - } - } - - /// Convert the original proof type to the v1 proof added in network version 7. - pub fn update_to_v1(&mut self) { - *self = match self { - Self::StackedDRG2KiBV1 => Self::StackedDRG2KiBV1P1, - Self::StackedDRG512MiBV1 => Self::StackedDRG512MiBV1P1, - Self::StackedDRG8MiBV1 => Self::StackedDRG8MiBV1P1, - Self::StackedDRG32GiBV1 => Self::StackedDRG32GiBV1P1, - Self::StackedDRG64GiBV1 => Self::StackedDRG64GiBV1P1, - _ => return, - }; - } - - #[deprecated(since = "0.1.10", note = "Logic should exist in actors")] - /// The maximum duration a sector sealed with this proof may exist between activation and expiration. - pub fn sector_maximum_lifetime(self) -> clock::ChainEpoch { - // For all Stacked DRG sectors, the max is 5 years - let epochs_per_year = 1_262_277; - 5 * epochs_per_year - } - - /// Proof size for each SealProof type - pub fn proof_size(self) -> Result { - use RegisteredSealProof::*; - match self { - StackedDRG2KiBV1 - | StackedDRG512MiBV1 - | StackedDRG8MiBV1 - | StackedDRG2KiBV1P1 - | StackedDRG512MiBV1P1 - | StackedDRG8MiBV1P1 - | StackedDRG2KiBV1P1_Feat_SyntheticPoRep - | StackedDRG512MiBV1P1_Feat_SyntheticPoRep - | StackedDRG8MiBV1P1_Feat_SyntheticPoRep => Ok(192), - - StackedDRG32GiBV1 - | StackedDRG64GiBV1 - | StackedDRG32GiBV1P1 - | StackedDRG64GiBV1P1 - | StackedDRG32GiBV1P1_Feat_SyntheticPoRep - | StackedDRG64GiBV1P1_Feat_SyntheticPoRep => Ok(1920), - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } -} - -impl Default for RegisteredSealProof { - fn default() -> Self { - Self::Invalid(-1) - } -} - -/// Proof of spacetime type, indicating version and sector size of the proof. -#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] -#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] -pub enum RegisteredPoStProof { - StackedDRGWinning2KiBV1, - StackedDRGWinning8MiBV1, - StackedDRGWinning512MiBV1, - StackedDRGWinning32GiBV1, - StackedDRGWinning64GiBV1, - StackedDRGWindow2KiBV1, - StackedDRGWindow8MiBV1, - StackedDRGWindow512MiBV1, - StackedDRGWindow32GiBV1, - StackedDRGWindow64GiBV1, - StackedDRGWindow2KiBV1P1, - StackedDRGWindow8MiBV1P1, - StackedDRGWindow512MiBV1P1, - StackedDRGWindow32GiBV1P1, - StackedDRGWindow64GiBV1P1, - Invalid(i64), -} - -impl RegisteredPoStProof { - /// Returns the sector size of the proof type, which is measured in bytes. - pub fn sector_size(self) -> Result { - use RegisteredPoStProof::*; - match self { - StackedDRGWindow2KiBV1P1 | StackedDRGWindow2KiBV1 | StackedDRGWinning2KiBV1 => { - Ok(SectorSize::_2KiB) - } - StackedDRGWindow8MiBV1P1 | StackedDRGWindow8MiBV1 | StackedDRGWinning8MiBV1 => { - Ok(SectorSize::_8MiB) - } - StackedDRGWindow512MiBV1P1 | StackedDRGWindow512MiBV1 | StackedDRGWinning512MiBV1 => { - Ok(SectorSize::_512MiB) - } - StackedDRGWindow32GiBV1P1 | StackedDRGWindow32GiBV1 | StackedDRGWinning32GiBV1 => { - Ok(SectorSize::_32GiB) - } - StackedDRGWindow64GiBV1P1 | StackedDRGWindow64GiBV1 | StackedDRGWinning64GiBV1 => { - Ok(SectorSize::_64GiB) - } - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } - - /// Proof size for each PoStProof type - pub fn proof_size(self) -> Result { - use RegisteredPoStProof::*; - match self { - StackedDRGWinning2KiBV1 - | StackedDRGWinning8MiBV1 - | StackedDRGWinning512MiBV1 - | StackedDRGWinning32GiBV1 - | StackedDRGWinning64GiBV1 - | StackedDRGWindow2KiBV1 - | StackedDRGWindow8MiBV1 - | StackedDRGWindow512MiBV1 - | StackedDRGWindow32GiBV1 - | StackedDRGWindow64GiBV1 - | StackedDRGWindow2KiBV1P1 - | StackedDRGWindow8MiBV1P1 - | StackedDRGWindow512MiBV1P1 - | StackedDRGWindow32GiBV1P1 - | StackedDRGWindow64GiBV1P1 => Ok(192), - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } - /// Returns the partition size, in sectors, associated with a proof type. - /// The partition size is the number of sectors proven in a single PoSt proof. - pub fn window_post_partitions_sector(self) -> Result { - // Resolve to post proof and then compute size from that. - use RegisteredPoStProof::*; - match self { - StackedDRGWinning64GiBV1 | StackedDRGWindow64GiBV1 | StackedDRGWindow64GiBV1P1 => { - Ok(2300) - } - StackedDRGWinning32GiBV1 | StackedDRGWindow32GiBV1 | StackedDRGWindow32GiBV1P1 => { - Ok(2349) - } - StackedDRGWinning2KiBV1 | StackedDRGWindow2KiBV1 | StackedDRGWindow2KiBV1P1 => Ok(2), - StackedDRGWinning8MiBV1 | StackedDRGWindow8MiBV1 | StackedDRGWindow8MiBV1P1 => Ok(2), - StackedDRGWinning512MiBV1 | StackedDRGWindow512MiBV1 | StackedDRGWindow512MiBV1P1 => { - Ok(2) - } - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } -} - -impl RegisteredSealProof { - /// Returns the sector size of the proof type, which is measured in bytes. - pub fn sector_size(self) -> Result { - use RegisteredSealProof::*; - match self { - StackedDRG2KiBV1 | StackedDRG2KiBV1P1 | StackedDRG2KiBV1P1_Feat_SyntheticPoRep => { - Ok(SectorSize::_2KiB) - } - StackedDRG8MiBV1 | StackedDRG8MiBV1P1 | StackedDRG8MiBV1P1_Feat_SyntheticPoRep => { - Ok(SectorSize::_8MiB) - } - StackedDRG512MiBV1 - | StackedDRG512MiBV1P1 - | StackedDRG512MiBV1P1_Feat_SyntheticPoRep => Ok(SectorSize::_512MiB), - StackedDRG32GiBV1 | StackedDRG32GiBV1P1 | StackedDRG32GiBV1P1_Feat_SyntheticPoRep => { - Ok(SectorSize::_32GiB) - } - StackedDRG64GiBV1 | StackedDRG64GiBV1P1 | StackedDRG64GiBV1P1_Feat_SyntheticPoRep => { - Ok(SectorSize::_64GiB) - } - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } - - /// Returns the partition size, in sectors, associated with a proof type. - /// The partition size is the number of sectors proven in a single PoSt proof. - pub fn window_post_partitions_sector(self) -> Result { - // Resolve to seal proof and then compute size from that. - use RegisteredSealProof::*; - match self { - StackedDRG64GiBV1 | StackedDRG64GiBV1P1 | StackedDRG64GiBV1P1_Feat_SyntheticPoRep => { - Ok(2300) - } - StackedDRG32GiBV1 | StackedDRG32GiBV1P1 | StackedDRG32GiBV1P1_Feat_SyntheticPoRep => { - Ok(2349) - } - StackedDRG2KiBV1 | StackedDRG2KiBV1P1 | StackedDRG2KiBV1P1_Feat_SyntheticPoRep => Ok(2), - StackedDRG8MiBV1 | StackedDRG8MiBV1P1 | StackedDRG8MiBV1P1_Feat_SyntheticPoRep => Ok(2), - StackedDRG512MiBV1 - | StackedDRG512MiBV1P1 - | StackedDRG512MiBV1P1_Feat_SyntheticPoRep => Ok(2), - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } - - /// Produces the winning PoSt-specific RegisteredProof corresponding - /// to the receiving RegisteredProof. - pub fn registered_winning_post_proof(self) -> Result { - use RegisteredPoStProof::*; - match self { - Self::StackedDRG64GiBV1 - | Self::StackedDRG64GiBV1P1 - | Self::StackedDRG64GiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWinning64GiBV1), - Self::StackedDRG32GiBV1 - | Self::StackedDRG32GiBV1P1 - | Self::StackedDRG32GiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWinning32GiBV1), - Self::StackedDRG2KiBV1 - | Self::StackedDRG2KiBV1P1 - | Self::StackedDRG2KiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWinning2KiBV1), - Self::StackedDRG8MiBV1 - | Self::StackedDRG8MiBV1P1 - | Self::StackedDRG8MiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWinning8MiBV1), - Self::StackedDRG512MiBV1 - | Self::StackedDRG512MiBV1P1 - | Self::StackedDRG512MiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWinning512MiBV1), - Self::Invalid(_) => Err(format!( - "Unsupported mapping from {:?} to PoSt-winning RegisteredProof", - self - )), - } - } - - /// Produces the windowed PoSt-specific RegisteredProof corresponding - /// to the receiving RegisteredProof. - pub fn registered_window_post_proof(self) -> Result { - use RegisteredPoStProof::*; - match self { - Self::StackedDRG64GiBV1 - | Self::StackedDRG64GiBV1P1 - | Self::StackedDRG64GiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWindow64GiBV1P1), - Self::StackedDRG32GiBV1 - | Self::StackedDRG32GiBV1P1 - | Self::StackedDRG32GiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWindow32GiBV1P1), - Self::StackedDRG2KiBV1 - | Self::StackedDRG2KiBV1P1 - | Self::StackedDRG2KiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWindow2KiBV1P1), - Self::StackedDRG8MiBV1 - | Self::StackedDRG8MiBV1P1 - | Self::StackedDRG8MiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWindow8MiBV1P1), - Self::StackedDRG512MiBV1 - | Self::StackedDRG512MiBV1P1 - | Self::StackedDRG512MiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRGWindow512MiBV1P1), - Self::Invalid(_) => Err(format!( - "Unsupported mapping from {:?} to PoSt-window RegisteredProof", - self - )), - } - } - - /// Produces the update RegisteredProof corresponding to the receiving RegisteredProof. - pub fn registered_update_proof(self) -> Result { - use RegisteredUpdateProof::*; - match self { - Self::StackedDRG64GiBV1 - | Self::StackedDRG64GiBV1P1 - | Self::StackedDRG64GiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRG64GiBV1), - Self::StackedDRG32GiBV1 - | Self::StackedDRG32GiBV1P1 - | Self::StackedDRG32GiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRG32GiBV1), - Self::StackedDRG2KiBV1 - | Self::StackedDRG2KiBV1P1 - | Self::StackedDRG2KiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRG2KiBV1), - Self::StackedDRG8MiBV1 - | Self::StackedDRG8MiBV1P1 - | Self::StackedDRG8MiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRG8MiBV1), - Self::StackedDRG512MiBV1 - | Self::StackedDRG512MiBV1P1 - | Self::StackedDRG512MiBV1P1_Feat_SyntheticPoRep => Ok(StackedDRG512MiBV1), - Self::Invalid(_) => Err(format!( - "Unsupported mapping from {:?} to Update RegisteredProof", - self - )), - } - } -} - -/// Seal proof type which defines the version and sector size. -#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] -pub enum RegisteredAggregateProof { - SnarkPackV1, - SnarkPackV2, - Invalid(i64), -} - -/// Proof of update type -#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] -pub enum RegisteredUpdateProof { - StackedDRG2KiBV1, - StackedDRG8MiBV1, - StackedDRG512MiBV1, - StackedDRG32GiBV1, - StackedDRG64GiBV1, - Invalid(i64), -} - -macro_rules! i64_conversion { - ($ty:ident; $( $var:ident => $val:expr, )*) => { - impl From for $ty { - fn from(value: i64) -> Self { - match value { - $( $val => $ty::$var, )* - other => $ty::Invalid(other), - } - } - } - impl From<$ty> for i64 { - fn from(proof: $ty) -> Self { - match proof { - $( $ty::$var => $val, )* - $ty::Invalid(other) => other, - } - } - } - } -} - -i64_conversion! { - RegisteredPoStProof; - StackedDRGWinning2KiBV1 => 0, - StackedDRGWinning8MiBV1 => 1, - StackedDRGWinning512MiBV1 => 2, - StackedDRGWinning32GiBV1 => 3, - StackedDRGWinning64GiBV1 => 4, - StackedDRGWindow2KiBV1 => 5, - StackedDRGWindow8MiBV1 => 6, - StackedDRGWindow512MiBV1 => 7, - StackedDRGWindow32GiBV1 => 8, - StackedDRGWindow64GiBV1 => 9, - StackedDRGWindow2KiBV1P1 => 10, - StackedDRGWindow8MiBV1P1 => 11, - StackedDRGWindow512MiBV1P1 => 12, - StackedDRGWindow32GiBV1P1 => 13, - StackedDRGWindow64GiBV1P1 => 14, -} - -i64_conversion! { - RegisteredSealProof; - StackedDRG2KiBV1 => 0, - StackedDRG8MiBV1 => 1, - StackedDRG512MiBV1 => 2, - StackedDRG32GiBV1 => 3, - StackedDRG64GiBV1 => 4, - - StackedDRG2KiBV1P1 => 5, - StackedDRG8MiBV1P1 => 6, - StackedDRG512MiBV1P1 => 7, - StackedDRG32GiBV1P1 => 8, - StackedDRG64GiBV1P1 => 9, - - StackedDRG2KiBV1P1_Feat_SyntheticPoRep => 10, - StackedDRG8MiBV1P1_Feat_SyntheticPoRep => 11, - StackedDRG512MiBV1P1_Feat_SyntheticPoRep => 12, - StackedDRG32GiBV1P1_Feat_SyntheticPoRep => 13, - StackedDRG64GiBV1P1_Feat_SyntheticPoRep => 14, -} - -i64_conversion! { - RegisteredAggregateProof; - SnarkPackV1 => 0, - SnarkPackV2 => 1, -} - -i64_conversion! { - RegisteredUpdateProof; - StackedDRG2KiBV1 => 0, - StackedDRG8MiBV1 => 1, - StackedDRG512MiBV1 => 2, - StackedDRG32GiBV1 => 3, - StackedDRG64GiBV1 => 4, -} -#[cfg(feature = "proofs")] -impl TryFrom for filecoin_proofs_api::RegisteredAggregationProof { - type Error = String; - fn try_from(p: RegisteredAggregateProof) -> Result { - use RegisteredAggregateProof::*; - match p { - SnarkPackV1 => Ok(Self::SnarkPackV1), - SnarkPackV2 => Ok(Self::SnarkPackV2), - Invalid(i) => Err(format!("unsupported aggregate proof type: {}", i)), - } - } -} - -#[cfg(feature = "proofs")] -impl TryFrom for filecoin_proofs_api::RegisteredSealProof { - type Error = String; - fn try_from(p: RegisteredSealProof) -> Result { - use RegisteredSealProof::*; - match p { - StackedDRG64GiBV1 => Ok(Self::StackedDrg64GiBV1), - StackedDRG32GiBV1 => Ok(Self::StackedDrg32GiBV1), - StackedDRG2KiBV1 => Ok(Self::StackedDrg2KiBV1), - StackedDRG8MiBV1 => Ok(Self::StackedDrg8MiBV1), - StackedDRG512MiBV1 => Ok(Self::StackedDrg512MiBV1), - StackedDRG64GiBV1P1 => Ok(Self::StackedDrg64GiBV1_1), - StackedDRG32GiBV1P1 => Ok(Self::StackedDrg32GiBV1_1), - StackedDRG2KiBV1P1 => Ok(Self::StackedDrg2KiBV1_1), - StackedDRG8MiBV1P1 => Ok(Self::StackedDrg8MiBV1_1), - StackedDRG512MiBV1P1 => Ok(Self::StackedDrg512MiBV1_1), - StackedDRG64GiBV1P1_Feat_SyntheticPoRep => { - Ok(Self::StackedDrg64GiBV1_1_Feat_SyntheticPoRep) - } - StackedDRG32GiBV1P1_Feat_SyntheticPoRep => { - Ok(Self::StackedDrg32GiBV1_1_Feat_SyntheticPoRep) - } - StackedDRG2KiBV1P1_Feat_SyntheticPoRep => { - Ok(Self::StackedDrg2KiBV1_1_Feat_SyntheticPoRep) - } - StackedDRG8MiBV1P1_Feat_SyntheticPoRep => { - Ok(Self::StackedDrg8MiBV1_1_Feat_SyntheticPoRep) - } - StackedDRG512MiBV1P1_Feat_SyntheticPoRep => { - Ok(Self::StackedDrg512MiBV1_1_Feat_SyntheticPoRep) - } - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } -} - -#[cfg(feature = "proofs")] -impl TryFrom for filecoin_proofs_api::RegisteredPoStProof { - type Error = String; - fn try_from(p: RegisteredPoStProof) -> Result { - use RegisteredPoStProof::*; - match p { - StackedDRGWinning2KiBV1 => Ok(Self::StackedDrgWinning2KiBV1), - StackedDRGWinning8MiBV1 => Ok(Self::StackedDrgWinning8MiBV1), - StackedDRGWinning512MiBV1 => Ok(Self::StackedDrgWinning512MiBV1), - StackedDRGWinning32GiBV1 => Ok(Self::StackedDrgWinning32GiBV1), - StackedDRGWinning64GiBV1 => Ok(Self::StackedDrgWinning64GiBV1), - StackedDRGWindow2KiBV1 => Ok(Self::StackedDrgWindow2KiBV1), - StackedDRGWindow8MiBV1 => Ok(Self::StackedDrgWindow8MiBV1), - StackedDRGWindow512MiBV1 => Ok(Self::StackedDrgWindow512MiBV1), - StackedDRGWindow32GiBV1 => Ok(Self::StackedDrgWindow32GiBV1), - StackedDRGWindow64GiBV1 => Ok(Self::StackedDrgWindow64GiBV1), - StackedDRGWindow2KiBV1P1 => Ok(Self::StackedDrgWindow2KiBV1_2), - StackedDRGWindow8MiBV1P1 => Ok(Self::StackedDrgWindow8MiBV1_2), - StackedDRGWindow512MiBV1P1 => Ok(Self::StackedDrgWindow512MiBV1_2), - StackedDRGWindow32GiBV1P1 => Ok(Self::StackedDrgWindow32GiBV1_2), - StackedDRGWindow64GiBV1P1 => Ok(Self::StackedDrgWindow64GiBV1_2), - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } -} - -#[cfg(feature = "proofs")] -impl TryFrom for filecoin_proofs_api::RegisteredUpdateProof { - type Error = String; - fn try_from(p: RegisteredUpdateProof) -> Result { - use RegisteredUpdateProof::*; - match p { - StackedDRG2KiBV1 => Ok(Self::StackedDrg2KiBV1), - StackedDRG8MiBV1 => Ok(Self::StackedDrg8MiBV1), - StackedDRG512MiBV1 => Ok(Self::StackedDrg512MiBV1), - StackedDRG32GiBV1 => Ok(Self::StackedDrg32GiBV1), - StackedDRG64GiBV1 => Ok(Self::StackedDrg64GiBV1), - Invalid(i) => Err(format!("unsupported proof type: {}", i)), - } - } -} - -impl Serialize for RegisteredPoStProof { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - i64::from(*self).serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for RegisteredPoStProof { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let val = i64::deserialize(deserializer)?; - Ok(Self::from(val)) - } -} - -impl Serialize for RegisteredSealProof { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - i64::from(*self).serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for RegisteredSealProof { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let val = i64::deserialize(deserializer)?; - Ok(Self::from(val)) - } -} - -impl Serialize for RegisteredAggregateProof { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - i64::from(*self).serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for RegisteredAggregateProof { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let val = i64::deserialize(deserializer)?; - Ok(Self::from(val)) - } -} - -impl Serialize for RegisteredUpdateProof { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - i64::from(*self).serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for RegisteredUpdateProof { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let val = i64::deserialize(deserializer)?; - Ok(Self::from(val)) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/state/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/state/mod.rs deleted file mode 100644 index 81bfb885224e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/state/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use cid::Cid; -use fvm_ipld_encoding::repr::*; -use fvm_ipld_encoding::tuple::*; -use serde::{Deserialize, Serialize}; - -/// Specifies the version of the state tree -#[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Serialize_repr, Deserialize_repr)] -#[repr(u64)] -pub enum StateTreeVersion { - /// Corresponds to actors < v2 - V0, - /// Corresponds to actors = v2 - V1, - /// Corresponds to actors = v3 - V2, - /// Corresponds to actors = v4 - V3, - /// Corresponds to actors >= v5 - V4, - /// Corresponding to actors >= v10 - V5, -} - -/// State root information. Contains information about the version of the state tree, -/// the root of the tree, and a link to the information about the tree. -#[derive(Deserialize_tuple, Serialize_tuple)] -pub struct StateRoot { - /// State tree version - pub version: StateTreeVersion, - - /// Actors tree. The structure depends on the state root version. - pub actors: Cid, - - /// Info. The structure depends on the state root version. - pub info: Cid, -} - -/// Empty state tree information. This is serialized as an array for future proofing. -#[derive(Default, Deserialize, Serialize)] -#[serde(transparent)] -pub struct StateInfo0([(); 0]); diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sys/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sys/mod.rs deleted file mode 100644 index 2517c0bcd227..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sys/mod.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// SPDX-License-Identifier: Apache-2.0, MIT -//! This module contains types exchanged at the syscall layer between actors -//! (usually through the SDK) and the FVM. - -use bitflags::bitflags; -use num_bigint::TryFromBigIntError; - -pub mod out; - -pub type BlockId = u32; -pub type Codec = u64; - -/// The token amount type used in syscalls. It can represent any token amount (in atto-FIL) from 0 -/// to `2^128-1` attoFIL. Or 0 to about 340 exaFIL. -/// -/// Internally, this type is a tuple of `u64`s storing the "low" and "high" bits of a little-endian -/// u128. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[repr(packed, C)] -pub struct TokenAmount { - pub lo: u64, - pub hi: u64, -} - -impl From for crate::econ::TokenAmount { - fn from(v: TokenAmount) -> Self { - crate::econ::TokenAmount::from_atto((v.hi as u128) << 64 | (v.lo as u128)) - } -} - -impl TryFrom for TokenAmount { - type Error = TryFromBigIntError<()>; - fn try_from(v: crate::econ::TokenAmount) -> Result { - v.atto().try_into().map(|v: u128| Self { - hi: (v >> u64::BITS) as u64, - lo: v as u64, - }) - } -} - -impl<'a> TryFrom<&'a crate::econ::TokenAmount> for TokenAmount { - type Error = TryFromBigIntError<()>; - fn try_from(v: &'a crate::econ::TokenAmount) -> Result { - v.atto().try_into().map(|v: u128| Self { - hi: (v >> u64::BITS) as u64, - lo: v as u64, - }) - } -} - -bitflags! { - /// Flags passed to the send syscall. - #[derive(Default, Copy, Clone, Eq, PartialEq, Debug)] - #[repr(transparent)] - // note: this is 64 bits because I don't want to hate my past self, not because we need them - // right now. It doesn't really cost anything anyways. - pub struct SendFlags: u64 { - /// Send in "read-only" mode. - const READ_ONLY = 0b00000001; - } -} - -impl SendFlags { - pub fn read_only(self) -> bool { - self.intersects(Self::READ_ONLY) - } -} - -/// An unsafe trait to mark "syscall safe" types. These types must be safe to memcpy to and from -/// WASM. This means: -/// -/// 1. Repr C & packed alignment (no reordering, no padding). -/// 2. Copy, Sized, and no pointers. -/// 3. No floats (non-determinism). -/// -/// # Safety -/// -/// Incorrectly implementing this could lead to undefined behavior in types passed between wasm and -/// rust. -pub unsafe trait SyscallSafe: Copy + Sized + 'static {} - -macro_rules! assert_syscall_safe { - ($($t:ty,)*) => { - $(unsafe impl SyscallSafe for $t {})* - } -} - -assert_syscall_safe! { - (), - - u8, u16, u32, u64, - i8, i16, i32, i64, - - TokenAmount, - out::ipld::IpldOpen, - out::ipld::IpldStat, - out::send::Send, - out::crypto::VerifyConsensusFault, - out::network::NetworkContext, - out::vm::MessageContext, -} - -unsafe impl SyscallSafe for [T; N] where T: SyscallSafe {} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/version/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/version/mod.rs deleted file mode 100644 index 439d97cfeb27..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/version/mod.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2021-2023 Protocol Labs -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use std::fmt::Display; - -use serde::{Deserialize, Serialize}; - -/// Specifies the network version -#[derive(Debug, Eq, PartialEq, Clone, Copy, Ord, PartialOrd, Serialize, Deserialize)] -#[repr(transparent)] -#[serde(transparent)] -pub struct NetworkVersion(u32); - -impl NetworkVersion { - /// genesis (specs-actors v0.9.3) - pub const V0: Self = Self(0); - /// breeze (specs-actors v0.9.7) - pub const V1: Self = Self(1); - /// smoke (specs-actors v0.9.8) - pub const V2: Self = Self(2); - /// ignition (specs-actors v0.9.11) - pub const V3: Self = Self(3); - /// actors v2 (specs-actors v2.0.x) - pub const V4: Self = Self(4); - /// tape (increases max prove commit size by 10x) - pub const V5: Self = Self(5); - /// kumquat (specs-actors v2.2.0) - pub const V6: Self = Self(6); - /// calico (specs-actors v2.3.2) - pub const V7: Self = Self(7); - /// persian (post-2.3.2 behaviour transition) - pub const V8: Self = Self(8); - /// orange - pub const V9: Self = Self(9); - /// trust (specs-actors v3.0.x) - pub const V10: Self = Self(10); - /// norwegian (specs-actors v3.1.x) - pub const V11: Self = Self(11); - /// turbo (specs-actors v4.0.x) - pub const V12: Self = Self(12); - /// HyperDrive - pub const V13: Self = Self(13); - /// Chocolate v6 - pub const V14: Self = Self(14); - /// OhSnap v7 - pub const V15: Self = Self(15); - /// Skyr (builtin-actors v8) - pub const V16: Self = Self(16); - /// Shark (builtin-actors v9) - pub const V17: Self = Self(17); - /// Hygge (builtin-actors v10) - pub const V18: Self = Self(18); - /// Lightning (builtin-actors v11) - pub const V19: Self = Self(19); - /// Thunder (builtin-actors v11) - pub const V20: Self = Self(20); - /// Watermelon (builtin-actors v12) - pub const V21: Self = Self(21); - - pub const MAX: Self = Self(u32::MAX); - - /// Construct a new arbitrary network version. - pub const fn new(v: u32) -> Self { - Self(v) - } -} - -impl Display for NetworkVersion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} - -impl From for NetworkVersion { - fn from(v: u32) -> Self { - Self(v) - } -} - -impl From for u32 { - fn from(v: NetworkVersion) -> Self { - v.0 - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..df8adce85c54 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "9172edcb01ede1d7d50744f1182ffca7275f710c" + }, + "path_in_vcs": "shared" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/CHANGELOG.md new file mode 100644 index 000000000000..8ce9f2f53397 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/CHANGELOG.md @@ -0,0 +1,295 @@ +# Changelog + +## [Unreleased] + +## 4.5.1 [2024-11-08] + +Remove unnecessary features from `multihash-codetable`. + +## 4.5.0 [2024-10-31] + +- Update `cid` to v0.11 and `multihash` to v0.19. +- Update to `fvm_ipld_blockstore` 0.3.0 and `fvm_ipld_encoding` 0.5.0. + +You will have to update your multihash and cid crates to be compatible, see the [multihash release notes](https://github.com/multiformats/rust-multihash/blob/master/CHANGELOG.md#-2023-06-06) for details on the breaking changes. + +## 4.4.3 [2024-10-21] + +- Update wasmtime to 25.0.2. +- Fixes long wasm compile times with wasmtime 24. + +## 4.4.2 [2024-10-09] + +- Update wasmtime to 24.0.1. + +## 4.4.1 [2024-10-04] + +- chore: remove the `nv24-dev` feature flag [#2051](https://github.com/filecoin-project/ref-fvm/pull/2051) + +## 4.4.0 [2024-09-12] + +- Update to wasmtime 24. +- Switch from mach ports to unix signal handlers on macos. +- Update misc dependencies. + +## 4.3.2 [2024-08-16] + +- feat: add `nv24-dev` feature flag [#2029](https://github.com/filecoin-project/ref-fvm/pull/2029) + +## 4.3.1 [2024-06-26] + +- chore: remove the `nv23-dev` feature flag [#2022](https://github.com/filecoin-project/ref-fvm/pull/2022) + +## 4.3.0 [2024-06-12] + +- feat: FIP-0079: syscall for aggregated bls verification [#2003](https://github.com/filecoin-project/ref-fvm/pull/2003) +- fix: install rust nightly toolchain for clusterfuzzlite [#2007](https://github.com/filecoin-project/ref-fvm/pull/2007) +- chore: upgrade rust toolchain to 1.78.0 [#2006](https://github.com/filecoin-project/ref-fvm/pull/2006) +- fix: remove the pairing feature from fvm_shared [#2009](https://github.com/filecoin-project/ref-fvm/pull/2009) +- Small tidy-ups in CONTRIBUTING.md [#2012](https://github.com/filecoin-project/ref-fvm/pull/2012) +- NI-PoRep support [#2010](https://github.com/filecoin-project/ref-fvm/pull/2010) + +## 4.2.0 [2024-04-29] + +- chore: update to wasmtime 19.0.1 [#1993](https://github.com/filecoin-project/ref-fvm/pull/1993) +- Enable nv23 support behind the `nv23-dev` feature flag [#2000](https://github.com/filecoin-project/ref-fvm/pull/2000) +- feat: fvm: remove once_cell [#1989](https://github.com/filecoin-project/ref-fvm/pull/1989) +- feat: shared: check bls zero address without lazy_static [#1984](https://github.com/filecoin-project/ref-fvm/pull/1984) + +## 4.1.2 [2024-01-31] + +feat: allow CBOR events + +## 4.1.1 [2024-01-25] + +Enable nv22 support by default. + +## 4.1.0 [2024-01-24] + +- Pretty-print addresses when debug-formatting, instead of printing the raw bytes as a vector. +- Move the `ActorState` struct to this crate (from the `fvm` crate). +- Add an `upgrade` module to this crate to support the new (disabled by default) actor-upgrade syscall. + +## 4.0.0 [2023-10-31] + +Final release, no changes. + +## 4.0.0-alpha.4 [2023-09-28] + +- Add back some proof types that were mistakenly removed, and fix some of the constants. + +## 4.0.0-alpha.3 [2023-09-27] + +- Remove support for v1 proofs. + +## 4.0.0-alpha.2 [2023-09-21] + +- Implement FIP-0071, FIP-0072, FIP-0073, FIP-0075 + +## 4.0.0-alpha.1 [2023-09-20] + +Unreleased. This release simply marks the change-over to v4. + +## 3.6.0 [2023-09-06] + +- BREAKING: Upgrade the proofs API to v16. +- BREAKING (linking): upgrade blstrs to v0.7 and +- BREAKING: update the minimum rust version to 1.70.0 +- Update & trim some dependencies. +- Add support for the new proofs in v16. + +## 3.5.0 [2023-08-18] + +- Add the V21 network version constant + +## 3.4.0 [2023-06-27] + +Breaking Changes: + +- Update cid/multihash. This is a breaking change as it affects the API. + +## 3.3.1 [2023-05-04] + +Fix some address constants (lazy statics, to be precise) when the current network is set to "testnet". Previously, if said constants were evaluated _after_ switching to testnet mode (calling `address::set_current_network`), they'd fail to parse and crash the program when dereferenced. + +## 3.3.0 [2023-04-23] + +- Fixes an issue with proof bindings. + +## 3.2.0 [2023-04-04] + +- Remove unused dependencies. +- Remove unused dependencies. +- BREAKING: Drop unused `registered_seal_proof` method. This appears to have been unused by anyone. + +## 3.1.0 [2023-03-09] + +Update proofs. Unfortunately, this is a breaking change in a minor release but we need to do the same on the v2 release as well. The correct solution is to introduce two crates, fvm1 and fvm2, but that's a future project. + +## 3.0.0 [2022-02-24] + +- Final release for NV18. + +## 3.0.0-alpha.20 [2022-02-06] + +- Change the `BLOCK_GAS_LIMIT` constant to a `u64` to match all the other gas values. + +## 3.0.0-alpha.19 [2022-02-06] + +- Change the event datastructure to take a codec and not double-encode the value. +- Make the message version and gas limits `u64`s instead of `i64`s. + +## 3.0.0-alpha.18 [2022-02-01] + +- Improve rustdocs around events and gas premium. + +## 3.0.0-alpha.17 [2022-01-17] + +- Add `hyperspace` feature to loosen up network version restrictions. + +## 3.0.0-alpha.16 [2023-01-12] + +- Remove uses of the Cbor trait +- Refactor: Move Response from SDK to shared + +## 3.0.0-alpha.15 [2022-12-14] + +- Refactor: ChainID was moved from FVM to shared +- Implement Ethereum Account abstraction + - Removes the f4-as-accont feature, and support for Delegated signature validations + +## 3.0.0-alpha.14 [2022-12-07] + +- Remove GasLimit from the message context. +- Add the message nonce to the message context +- Add the chain ID to the network context. + +## 3.0.0-alpha.13 [2022-11-29] + +- Remove deprecated SYS_INVALID_METHOD exit code +- Add a read-only mode to Sends + - Adds ContextFlags to MessageContext, and a special ReadOnly error + +## 3.0.0-alpha.12 [2022-11-17] + +- Refactor network/message contexts to reduce the number of syscalls. + +## 3.0.0-alpha.11 [2022-11-15] + +- Add support for actor events (FIP-0049). + +## 3.0.0-alpha.10 [2022-11-14] + +- Split `InvokeContext` into two (#1070) +- fix: correctly format negative token amounts (#1065) + +## 3.0.0-alpha.9 [2022-11-08] + +- Add support for state-tree v5. + +## 3.0.0-alpha.8 [2022-10-22] + +- fix compile issues with f4-as-account feature. + +## 3.0.0-alpha.7 [2022-10-21] + +- Temporary workaround: allow validating signatures from embryo f4 addresses + +## 3.0.0-alpha.6 [2022-10-20] + +- Make the f4 address conform to FIP0048 (use `f` as the separator). +- Implement `TryFrom` for `DelegatedAddress` (and make `DelegatedAddress` public). + +## 3.0.0-alpha.5 [2022-10-10] + +- Bumps `fvm_ipld_encoding` and switches from `cs_serde_bytes` to `fvm_ipld_encoding::strict_bytes`. + +## 3.0.0-alpha.4 [2022-10-10] + +- Small f4 address fixes. + +## 3.0.0-alpha.3 [2022-10-10] + +- Switch to rust 2021 edition. +- Add network version 18. +- BREAKING: Allow changing the address "network" at runtime. +- BREAKING: Update the f4 address format and include a checksum. +- BREAKING: Add the gas premium and gas limit to the `vm::context` return type. + +## 3.0.0-alpha.2 [2022-09-16] + +- Add basic f4 address support (without checksums for now). +- Change TokenAmount::from_whole to take any `Into` parameter. +- Add nv17 to the network versions. + +The only breaking change is the change to `Address`/`Protocol` (in case anyone is exhaustively matching on them). + +## 3.0.0-alpha.1 [2022-08-31] + +- Bump base version to v3. +- Add `origin` to `vm::Context`. + +## 2.0.0... + +See the `release/v2` branch. + +- Add recover secp public key syscall. +- Removed `actor::builtin::Type` (moved to the actors themselves). +- Add additional hash functions to the hash syscall. +- Add blake2b512 +- Change TokenAmount from a type alias to a struct wrapping BigInt + +## 0.8.0 [2022-06-13] + +- Add a new proofs version type. + +## 0.7.1 [2022-05-26] + +Add a shared `MAX_CID_LEN` constant. + +## 0.7.0 [2022-05-16] + +- Updates the blockstore. +- Removes unnecessary chrono dep. +- Removes the `DomainSeparationTag` type. This is moving into the actors themselves as the FVM + doesn't care about it. + - Downstream crates should just replicate this type internally, if necessary. +- Adds a new `crypto::signature::verify` function to allow verifying signatures without creating a + new `Signature` object. This allows verifying _borrowed_ signatures without allocating. +- Updates for the syscall refactor (see `fvm_sdk` v0.7.0): + - Adds a `BufferTooSmall` `ErrorNumber`. + - Marks `ErrorNumber` as non-exhaustive for future extension. + - Changes the syscall "out" types for the syscall refactor. + +## 0.6.1 [2022-04-29] + +- Added `testing` feature to have `Default` derive on `Message`. Extended this feature to `Address` and `Payload`. +- Improve `ErrorNumber` documentation. +- Update `fvm_ipld_encoding` for the cbor encoder switch. + +## 0.6.0 [2022-04-14] + +BREAKING: Switch syscall struct alignment: https://github.com/filecoin-project/fvm-specs/issues/63 + +Actors built against this new version of fvm_shared will be incompatible with prior FVM versions, +and vice-versa. + +- Added `Display` trait to `Type` for error printing. +- Added _cfg = "testing"_ on `Default` trait for `Message` structure. + +## 0.5.1 [2022-04-11] + +Add the `USR_ASSERTION_FAILED` exit code. + +## 0.5.0 [2022-04-11] + +- Enforce maximum big-int size to match lotus. +- Make signature properties public. +- Major error type refactor. + +The largest change here is a major error type refactor. + +1. It's now a u32 with a set of pre-defined values instead of an enum. +2. The error codes have been reworked according to the FVM spec. + +Both of these changes were made to better support user-defined actors. diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/Cargo.toml new file mode 100644 index 000000000000..d964c298c067 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/Cargo.toml @@ -0,0 +1,149 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "fvm_shared" +version = "4.5.1" +authors = [ + "ChainSafe Systems ", + "Protocol Labs", + "Filecoin Core Devs", +] +description = "Filecoin Virtual Machine shared types and functions" +license = "MIT OR Apache-2.0" +repository = "https://github.com/filecoin-project/ref-fvm" + +[dependencies.anyhow] +version = "1.0.71" + +[dependencies.arbitrary] +version = "1.3.0" +features = ["derive"] +optional = true + +[dependencies.bitflags] +version = "2.3.3" +features = ["serde"] + +[dependencies.blake2b_simd] +version = "1.0.1" + +[dependencies.bls-signatures] +version = "0.15" +optional = true +default-features = false + +[dependencies.cid] +version = "0.11.1" +features = [ + "serde-codec", + "std", +] +default-features = false + +[dependencies.data-encoding] +version = "2.4.0" + +[dependencies.data-encoding-macro] +version = "0.1.13" + +[dependencies.filecoin-proofs-api] +version = "18" +optional = true +default-features = false + +[dependencies.fvm_ipld_encoding] +version = "0.5.1" + +[dependencies.lazy_static] +version = "1.4.0" + +[dependencies.libsecp256k1] +version = "0.7.1" +optional = true + +[dependencies.num-bigint] +version = "0.4" + +[dependencies.num-derive] +version = "0.4.0" + +[dependencies.num-integer] +version = "0.1" + +[dependencies.num-traits] +version = "0.2.14" +default-features = false + +[dependencies.quickcheck] +version = "1.0.0" +optional = true + +[dependencies.serde] +version = "1.0.164" +features = ["derive"] +default-features = false + +[dependencies.serde_tuple] +version = "0.5.0" + +[dependencies.thiserror] +version = "1.0.40" + +[dependencies.unsigned-varint] +version = "0.8.0" + +[dev-dependencies.coverage-helper] +version = "0.2.0" + +[dev-dependencies.multihash-codetable] +version = "0.1.4" +features = [ + "sha2", + "sha3", + "ripemd", +] +default-features = false + +[dev-dependencies.quickcheck_macros] +version = "1.0.0" + +[dev-dependencies.rand] +version = "0.8.5" + +[dev-dependencies.rand_chacha] +version = "0.3.0" + +[dev-dependencies.rusty-fork] +version = "0.3.0" +default-features = false + +[dev-dependencies.serde_json] +version = "1.0.99" + +[features] +arb = [ + "arbitrary", + "dep:quickcheck", + "num-bigint/quickcheck", + "cid/arb", +] +blst = ["bls-signatures/blst"] +crypto = [ + "libsecp256k1", + "blst", + "proofs", +] +default = [] +proofs = ["filecoin-proofs-api"] +secp256k1 = ["libsecp256k1"] +testing = [] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/Cargo.toml.orig new file mode 100644 index 000000000000..6db41611f6d7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/Cargo.toml.orig @@ -0,0 +1,55 @@ +[package] +name = "fvm_shared" +description = "Filecoin Virtual Machine shared types and functions" +version.workspace = true +license.workspace = true +edition.workspace = true +repository.workspace = true +authors = ["ChainSafe Systems ", "Protocol Labs", "Filecoin Core Devs"] + +[dependencies] +blake2b_simd = { workspace = true } +thiserror = { workspace = true } +num-traits = { workspace = true } +num-derive = { workspace = true } +lazy_static = { workspace = true } +cid = { workspace = true, features = ["serde-codec", "std"] } +unsigned-varint = { workspace = true } +anyhow = { workspace = true } +fvm_ipld_encoding = { workspace = true } +serde = { workspace = true, default-features = false } +serde_tuple = { workspace = true } +arbitrary = { workspace = true, optional = true, features = ["derive"] } +quickcheck = { workspace = true, optional = true } + +num-bigint = "0.4" +num-integer = "0.1" +data-encoding = "2.4.0" +data-encoding-macro = "0.1.13" +bitflags = { version = "2.3.3", features = ["serde"] } + +## non-wasm dependencies; these dependencies and the respective code is +## only activated through non-default features, which the Kernel enables, but +## not the actors. +filecoin-proofs-api = { version = "18", default-features = false, optional = true } +libsecp256k1 = { workspace = true, optional = true } +bls-signatures = { workspace = true, default-features = false, optional = true } + +[dev-dependencies] +rand = { workspace = true } +serde_json = { workspace = true } +multihash-codetable = { workspace = true, features = ["sha2", "sha3", "ripemd"] } +quickcheck_macros = { workspace = true } +coverage-helper = { workspace = true } +fvm_shared = { path = ".", features = ["arb"] } +rand_chacha = { workspace = true } +rusty-fork = { version = "0.3.0", default-features = false } + +[features] +default = [] +crypto = ["libsecp256k1", "blst", "proofs"] +proofs = ["filecoin-proofs-api"] +secp256k1 = ["libsecp256k1"] +blst = ["bls-signatures/blst"] +testing = [] +arb = ["arbitrary", "dep:quickcheck", "num-bigint/quickcheck", "cid/arb"] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/errors.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/errors.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/errors.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/errors.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/mod.rs new file mode 100644 index 000000000000..de9d090fec94 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/mod.rs @@ -0,0 +1,458 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod errors; +mod network; +mod payload; +mod protocol; + +use std::borrow::Cow; +use std::fmt; +use std::hash::Hash; +use std::str::FromStr; + +use data_encoding::Encoding; +use data_encoding_macro::new_encoding; +use fvm_ipld_encoding::strict_bytes; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; + +pub use self::errors::Error; +pub use self::network::{current_network, set_current_network, Network}; +pub use self::payload::{DelegatedAddress, Payload}; +pub use self::protocol::Protocol; +use crate::ActorID; + +/// defines the encoder for base32 encoding with the provided string with no padding +const ADDRESS_ENCODER: Encoding = new_encoding! { + symbols: "abcdefghijklmnopqrstuvwxyz234567", + padding: None, +}; + +/// Hash length of payload for Secp and Actor addresses. +pub const PAYLOAD_HASH_LEN: usize = 20; + +/// Uncompressed secp public key used for validation of Secp addresses. +pub const SECP_PUB_LEN: usize = 65; + +/// BLS public key length used for validation of BLS addresses. +pub const BLS_PUB_LEN: usize = 48; + +/// Max length of f4 sub addresses. +pub const MAX_SUBADDRESS_LEN: usize = 54; + +/// Defines first available ID address after builtin actors +pub const FIRST_NON_SINGLETON_ADDR: ActorID = 100; + +/// The payload bytes of a "zero" BLS key. +const BLS_ZERO_ADDRESS_BYTES: [u8; BLS_PUB_LEN] = { + let mut buf = [0u8; BLS_PUB_LEN]; + buf[0] = 192; + buf +}; + +/// Length of the checksum hash for string encodings. +pub const CHECKSUM_HASH_LEN: usize = 4; + +/// The max encoded length of an address. +pub const MAX_ADDRESS_LEN: usize = 65; + +const MAX_ADDRRESS_TEXT_LEN: usize = 138; +const MAINNET_PREFIX: &str = "f"; +const TESTNET_PREFIX: &str = "t"; + +/// Address is the struct that defines the protocol and data payload conversion from either +/// a public key or value +#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "testing", derive(Default))] +#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] +pub struct Address { + payload: Payload, +} + +impl Address { + /// Construct a new address with the specified network. + fn new(protocol: Protocol, bz: &[u8]) -> Result { + Ok(Self { + payload: Payload::new(protocol, bz)?, + }) + } + + /// Creates address from encoded bytes. + pub fn from_bytes(bz: &[u8]) -> Result { + if bz.len() < 2 { + Err(Error::InvalidLength) + } else { + let protocol = Protocol::from_byte(bz[0]).ok_or(Error::UnknownProtocol)?; + Self::new(protocol, &bz[1..]) + } + } + + /// Generates new address using ID protocol. + pub const fn new_id(id: u64) -> Self { + Self { + payload: Payload::ID(id), + } + } + + /// Generates new address using Secp256k1 pubkey. + pub fn new_secp256k1(pubkey: &[u8]) -> Result { + if pubkey.len() != SECP_PUB_LEN { + return Err(Error::InvalidSECPLength(pubkey.len())); + } + Ok(Self { + payload: Payload::Secp256k1(address_hash(pubkey)), + }) + } + + /// Generates new address using the Actor protocol. + pub fn new_actor(data: &[u8]) -> Self { + Self { + payload: Payload::Actor(address_hash(data)), + } + } + + /// Generates a new delegated address from a namespace and a subaddress. + pub fn new_delegated(ns: ActorID, subaddress: &[u8]) -> Result { + Ok(Self { + payload: Payload::Delegated(DelegatedAddress::new(ns, subaddress)?), + }) + } + + /// Generates new address using BLS pubkey. + pub fn new_bls(pubkey: &[u8]) -> Result { + if pubkey.len() != BLS_PUB_LEN { + return Err(Error::InvalidBLSLength(pubkey.len())); + } + let mut key = [0u8; BLS_PUB_LEN]; + key.copy_from_slice(pubkey); + Ok(Self { + payload: Payload::BLS(key), + }) + } + + pub fn is_bls_zero_address(&self) -> bool { + match self.payload { + Payload::BLS(payload_bytes) => payload_bytes == BLS_ZERO_ADDRESS_BYTES, + _ => false, + } + } + + /// Returns protocol for Address + pub fn protocol(&self) -> Protocol { + Protocol::from(self.payload) + } + + /// Returns the `Payload` object from the address, where the respective protocol data is kept + /// in an enum separated by protocol + pub fn payload(&self) -> &Payload { + &self.payload + } + + /// Converts Address into `Payload` object, where the respective protocol data is kept + /// in an enum separated by protocol + pub fn into_payload(self) -> Payload { + self.payload + } + + /// Returns the raw bytes data payload of the Address + pub fn payload_bytes(&self) -> Vec { + self.payload.to_raw_bytes() + } + + /// Returns encoded bytes of Address + pub fn to_bytes(self) -> Vec { + self.payload.to_bytes() + } + + /// Get ID of the address. ID protocol only. + pub fn id(&self) -> Result { + match self.payload { + Payload::ID(id) => Ok(id), + _ => Err(Error::NonIDAddress), + } + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let protocol = self.protocol(); + + // write `fP` where P is the protocol number. + write!(f, "{}{}", current_network().to_prefix(), protocol)?; + + fn write_payload( + f: &mut fmt::Formatter<'_>, + protocol: Protocol, + prefix: Option<&[u8]>, + data: &[u8], + ) -> fmt::Result { + let mut hasher = blake2b_simd::Params::new() + .hash_length(CHECKSUM_HASH_LEN) + .to_state(); + hasher.update(&[protocol as u8]); + if let Some(prefix) = prefix { + hasher.update(prefix); + } + hasher.update(data); + + let mut buf = Vec::with_capacity(data.len() + CHECKSUM_HASH_LEN); + buf.extend(data); + buf.extend(hasher.finalize().as_bytes()); + + f.write_str(&ADDRESS_ENCODER.encode(&buf)) + } + + match self.payload() { + Payload::ID(id) => write!(f, "{}", id), + Payload::Secp256k1(data) | Payload::Actor(data) => { + write_payload(f, protocol, None, data) + } + Payload::BLS(data) => write_payload(f, protocol, None, data), + Payload::Delegated(addr) => { + write!(f, "{}f", addr.namespace())?; + write_payload( + f, + protocol, + Some(unsigned_varint::encode::u64( + addr.namespace(), + &mut unsigned_varint::encode::u64_buffer(), + )), + addr.subaddress(), + ) + } + } + } +} + +// Manually implement Debug so we print a "real" address. +impl fmt::Debug for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Address") + .field(&format_args!("\"{}\"", self)) + .finish() + } +} + +#[cfg(feature = "arb")] +impl quickcheck::Arbitrary for Address { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + Self { + payload: Payload::arbitrary(g), + } + } +} + +fn parse_address(addr: &str) -> Result<(Address, Network), Error> { + if addr.len() > MAX_ADDRRESS_TEXT_LEN || addr.len() < 3 { + return Err(Error::InvalidLength); + } + let network = Network::from_prefix(addr.get(0..1).ok_or(Error::UnknownNetwork)?)?; + + // get protocol from second character + let protocol: Protocol = match addr.get(1..2).ok_or(Error::UnknownProtocol)? { + "0" => Protocol::ID, + "1" => Protocol::Secp256k1, + "2" => Protocol::Actor, + "3" => Protocol::BLS, + "4" => Protocol::Delegated, + _ => { + return Err(Error::UnknownProtocol); + } + }; + + fn validate_and_split_checksum<'a>( + protocol: Protocol, + prefix: Option<&[u8]>, + payload: &'a [u8], + ) -> Result<&'a [u8], Error> { + if payload.len() < CHECKSUM_HASH_LEN { + return Err(Error::InvalidLength); + } + let (payload, csum) = payload.split_at(payload.len() - CHECKSUM_HASH_LEN); + let mut hasher = blake2b_simd::Params::new() + .hash_length(CHECKSUM_HASH_LEN) + .to_state(); + hasher.update(&[protocol as u8]); + if let Some(prefix) = prefix { + hasher.update(prefix); + } + hasher.update(payload); + if hasher.finalize().as_bytes() != csum { + return Err(Error::InvalidChecksum); + } + Ok(payload) + } + + // bytes after the protocol character is the data payload of the address + let raw = addr.get(2..).ok_or(Error::InvalidPayload)?; + let addr = match protocol { + Protocol::ID => { + if raw.len() > 20 { + // 20 is max u64 as string + return Err(Error::InvalidLength); + } + let id = raw.parse::()?; + Address { + payload: Payload::ID(id), + } + } + Protocol::Delegated => { + let (id, subaddr) = raw.split_once('f').ok_or(Error::InvalidPayload)?; + if id.len() > 20 { + // 20 is max u64 as string + return Err(Error::InvalidLength); + } + let id = id.parse::()?; + // decode subaddr + let subaddr_csum = ADDRESS_ENCODER.decode(subaddr.as_bytes())?; + // validate and split subaddr. + let subaddr = validate_and_split_checksum( + protocol, + Some(unsigned_varint::encode::u64( + id, + &mut unsigned_varint::encode::u64_buffer(), + )), + &subaddr_csum, + )?; + + Address { + payload: Payload::Delegated(DelegatedAddress::new(id, subaddr)?), + } + } + Protocol::Secp256k1 | Protocol::Actor | Protocol::BLS => { + // decode using byte32 encoding + let payload_csum = ADDRESS_ENCODER.decode(raw.as_bytes())?; + // validate and split payload. + let payload = validate_and_split_checksum(protocol, None, &payload_csum)?; + + // sanity check to make sure address hash values are correct length + if match protocol { + Protocol::Secp256k1 | Protocol::Actor => PAYLOAD_HASH_LEN, + Protocol::BLS => BLS_PUB_LEN, + _ => unreachable!(), + } != payload.len() + { + return Err(Error::InvalidPayload); + } + + Address::new(protocol, payload)? + } + }; + Ok((addr, network)) +} + +impl FromStr for Address { + type Err = Error; + fn from_str(addr: &str) -> Result { + current_network().parse_address(addr) + } +} + +impl Serialize for Address { + fn serialize(&self, s: S) -> Result + where + S: Serializer, + { + let address_bytes = self.to_bytes(); + strict_bytes::Serialize::serialize(&address_bytes, s) + } +} + +impl<'de> Deserialize<'de> for Address { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bz: Cow<'de, [u8]> = strict_bytes::Deserialize::deserialize(deserializer)?; + + // Create and return created address of unmarshalled bytes + Address::from_bytes(&bz).map_err(de::Error::custom) + } +} + +pub(crate) fn to_leb_bytes(id: u64) -> Vec { + // write id to buffer in leb128 format + unsigned_varint::encode::u64(id, &mut unsigned_varint::encode::u64_buffer()).into() +} + +pub(crate) fn from_leb_bytes(bz: &[u8]) -> Result { + // write id to buffer in leb128 format + let (id, remaining) = unsigned_varint::decode::u64(bz)?; + if !remaining.is_empty() { + return Err(Error::InvalidPayload); + } + Ok(id) +} + +/// Returns an address hash for given data +fn address_hash(ingest: &[u8]) -> [u8; 20] { + let digest = blake2b_simd::Params::new() + .hash_length(PAYLOAD_HASH_LEN) + .to_state() + .update(ingest) + .finalize(); + + let mut hash = [0u8; 20]; + hash.copy_from_slice(digest.as_bytes()); + hash +} + +#[cfg(test)] +mod tests { + // Test cases for FOR-02: https://github.com/ChainSafe/forest/issues/1134 + use crate::address::errors::Error; + use crate::address::{from_leb_bytes, to_leb_bytes}; + + #[test] + fn test_debug() { + // the address string is dependent on current network state which is set + // globally so we need to check against possible valid options + let addr_debug_str = format!("{:?}", super::Address::new_id(1)); + assert!(["Address(\"f01\")", "Address(\"t01\")"] + .iter() + .any(|&s| s == addr_debug_str)); + } + + #[test] + fn test_from_leb_bytes_passing() { + let passing = vec![67]; + assert_eq!(to_leb_bytes(from_leb_bytes(&passing).unwrap()), vec![67]); + } + + #[test] + fn test_from_leb_bytes_extra_bytes() { + let extra_bytes = vec![67, 0, 1, 2]; + + match from_leb_bytes(&extra_bytes) { + Ok(id) => { + println!( + "Successfully decoded bytes when it was not supposed to. Result was: {:?}", + &to_leb_bytes(id) + ); + panic!(); + } + Err(e) => { + assert_eq!(e, Error::InvalidPayload); + } + } + } + + #[test] + fn test_from_leb_bytes_minimal_encoding() { + let minimal_encoding = vec![67, 0, 130, 0]; + + match from_leb_bytes(&minimal_encoding) { + Ok(id) => { + println!( + "Successfully decoded bytes when it was not supposed to. Result was: {:?}", + &to_leb_bytes(id) + ); + panic!(); + } + Err(e) => { + assert_eq!(e, Error::InvalidPayload); + } + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/network.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/network.rs new file mode 100644 index 000000000000..5724947bcfa3 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/network.rs @@ -0,0 +1,106 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::sync::atomic::{AtomicU8, Ordering}; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::{FromPrimitive, ToPrimitive}; + +use super::{Address, Error, MAINNET_PREFIX, TESTNET_PREFIX}; + +static ATOMIC_NETWORK: AtomicU8 = AtomicU8::new(0); + +/// Network defines the preconfigured networks to use with address encoding +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, FromPrimitive, ToPrimitive)] +#[repr(u8)] +#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] +#[derive(Default)] +pub enum Network { + #[default] + Mainnet = 0, + Testnet = 1, +} + +impl Network { + /// to_prefix is used to convert the network into a string + /// used when converting address to string + pub(super) fn to_prefix(self) -> &'static str { + match self { + Network::Mainnet => MAINNET_PREFIX, + Network::Testnet => TESTNET_PREFIX, + } + } + + /// from_prefix is used to convert the network from a string + /// used when parsing + pub(super) fn from_prefix(s: &str) -> Result { + match s { + MAINNET_PREFIX => Ok(Network::Mainnet), + TESTNET_PREFIX => Ok(Network::Testnet), + _ => Err(Error::UnknownNetwork), + } + } + + /// Parse an address belonging to this network. + pub fn parse_address(self, addr: &str) -> Result { + let (addr, network) = super::parse_address(addr)?; + if network != self { + return Err(Error::UnknownNetwork); + } + Ok(addr) + } +} + +/// Gets the current network. +pub fn current_network() -> Network { + Network::from_u8(ATOMIC_NETWORK.load(Ordering::Relaxed)).unwrap_or_default() +} + +/// Sets the default network. +/// +/// The network is used to differentiate between different filecoin networks _in text_ but isn't +/// actually encoded in the binary representation of addresses. Changing the current network will: +/// +/// 1. Change the prefix used when formatting an address as a string. +/// 2. Change the prefix _accepted_ when parsing an address. +pub fn set_current_network(network: Network) { + ATOMIC_NETWORK.store(network.to_u8().unwrap_or_default(), Ordering::Relaxed) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + use crate::address::Address; + + // We fork this test into a new process because it messes with global state. + use rusty_fork::rusty_fork_test; + rusty_fork_test! { + #[test] + fn set_network() { + assert_eq!(current_network(), Network::default()); + assert_eq!(Network::default(), Network::Mainnet); + + // We're in mainnet mode. + let addr1 = Address::from_str("f01"); + Address::from_str("t01").expect_err("should have failed to parse testnet address"); + assert_eq!( + addr1, + Network::Testnet.parse_address("t01"), + "parsing an explicit address should still work" + ); + + // Switch to testnet mode. + set_current_network(Network::Testnet); + + // Now we're in testnet mode. + let addr2 = Address::from_str("t01"); + Address::from_str("f01").expect_err("should have failed to parse testnet address"); + + // Networks are relevent for parsing only. + assert_eq!(addr1, addr2) + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/payload.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/payload.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/payload.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/payload.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/protocol.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/protocol.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/address/protocol.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/address/protocol.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/bigint/bigint_ser.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/bigint/bigint_ser.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/bigint/bigint_ser.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/bigint/bigint_ser.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/bigint/biguint_ser.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/bigint/biguint_ser.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/bigint/biguint_ser.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/bigint/biguint_ser.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/bigint/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/bigint/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/bigint/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/bigint/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/chainid/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/chainid/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/chainid/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/chainid/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/clock/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/clock/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/clock/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/clock/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/clock/quantize.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/clock/quantize.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/clock/quantize.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/clock/quantize.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/commcid/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/commcid/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/commcid/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/commcid/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/consensus/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/consensus/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/consensus/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/consensus/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/hash.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/hash.rs new file mode 100644 index 000000000000..b7b5a75688c4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/hash.rs @@ -0,0 +1,17 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[repr(u64)] +pub enum SupportedHashes { + Sha2_256 = 0x12, + Blake2b256 = 0xb220, + Blake2b512 = 0xb240, + Keccak256 = 0x1b, + Ripemd160 = 0x1053, +} + +impl From for u64 { + fn from(value: SupportedHashes) -> Self { + value as Self + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/signature.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/signature.rs similarity index 86% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/signature.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/signature.rs index 36caf5afd433..9f7fe6374fe8 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/crypto/signature.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/crypto/signature.rs @@ -158,7 +158,6 @@ pub mod ops { use super::{Error, SECP_SIG_LEN, SECP_SIG_MESSAGE_HASH_SIZE}; use crate::address::{Address, Protocol}; - use crate::crypto::signature::Signature; /// Returns `String` error if a bls signature is invalid. pub fn verify_bls_sig(signature: &[u8], data: &[u8], addr: &Address) -> Result<(), String> { @@ -188,6 +187,38 @@ pub mod ops { } } + /// Verifies an aggregated BLS signature. Returns `Ok(false)` if signature verification fails + /// and `String` error if arguments are invalid. + pub fn verify_bls_aggregate( + aggregate_sig: &[u8; super::BLS_SIG_LEN], + pub_keys: &[[u8; super::BLS_PUB_LEN]], + plaintexts: &[&[u8]], + ) -> Result { + // If the number of public keys and data does not match, return false; + let (num_pub_keys, num_plaintexts) = (pub_keys.len(), plaintexts.len()); + if num_pub_keys != num_plaintexts { + return Err(format!( + "unequal numbers of public keys ({num_pub_keys}) and plaintexts ({num_plaintexts})", + )); + } + if num_pub_keys == 0 { + return Ok(true); + } + + // Deserialize signature bytes into a curve point. + let sig = BlsSignature::from_bytes(aggregate_sig) + .map_err(|_| "bls aggregate signature bytes are invalid G2 curve point".to_string())?; + + // Deserialize each public key's bytes into a curve point. + let pub_keys = pub_keys + .iter() + .map(|pub_key| BlsPubKey::from_bytes(pub_key.as_slice())) + .collect::, _>>() + .map_err(|_| "bls public key bytes are invalid G2 curve point".to_string())?; + + Ok(bls_signatures::verify_messages(&sig, plaintexts, &pub_keys)) + } + /// Returns `String` error if a secp256k1 signature is invalid. pub fn verify_secp256k1_sig( signature: &[u8], @@ -229,37 +260,6 @@ pub mod ops { } } - /// Aggregates and verifies bls signatures collectively. - pub fn verify_bls_aggregate( - data: &[&[u8]], - pub_keys: &[&[u8]], - aggregate_sig: &Signature, - ) -> bool { - // If the number of public keys and data does not match, then return false - if data.len() != pub_keys.len() { - return false; - } - if data.is_empty() { - return true; - } - - let sig = match BlsSignature::from_bytes(aggregate_sig.bytes()) { - Ok(v) => v, - Err(_) => return false, - }; - - let pk_map_results: Result, _> = - pub_keys.iter().map(|x| BlsPubKey::from_bytes(x)).collect(); - - let pks = match pk_map_results { - Ok(v) => v, - Err(_) => return false, - }; - - // Does the aggregate verification - verify_messages(&sig, data, &pks[..]) - } - /// Return the public key used for signing a message given it's signing bytes hash and signature. pub fn recover_secp_public_key( hash: &[u8; SECP_SIG_MESSAGE_HASH_SIZE], @@ -322,24 +322,27 @@ mod tests { let private_keys: Vec = (0..num_sigs).map(|_| PrivateKey::generate(rng)).collect(); - let public_keys: Vec<_> = private_keys + let public_keys: Vec<[u8; BLS_PUB_LEN]> = private_keys .iter() - .map(|x| x.public_key().as_bytes()) + .map(|x| { + x.public_key() + .as_bytes() + .try_into() + .expect("public key bytes to array conversion should not fail") + }) .collect(); let signatures: Vec = (0..num_sigs) .map(|x| private_keys[x].sign(data[x])) .collect(); - let public_keys_slice: Vec<&[u8]> = public_keys.iter().map(|x| &**x).collect(); + let agg_sig: [u8; BLS_SIG_LEN] = bls_signatures::aggregate(&signatures) + .expect("bls signature aggregation should not fail") + .as_bytes() + .try_into() + .expect("bls aggregate signature to bytes array should not fail"); - let calculated_bls_agg = - Signature::new_bls(bls_signatures::aggregate(&signatures).unwrap().as_bytes()); - assert!(verify_bls_aggregate( - &data, - &public_keys_slice, - &calculated_bls_agg - ),); + assert!(verify_bls_aggregate(&agg_sig, &public_keys, &data).unwrap()); } #[test] diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/deal/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/deal/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/deal/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/deal/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/econ/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/econ/mod.rs new file mode 100644 index 000000000000..b2b9b0884132 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/econ/mod.rs @@ -0,0 +1,482 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::cmp::Ordering; +use std::fmt; +use std::iter::Sum; +use std::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; + +use num_bigint::BigInt; +use num_integer::Integer; +use num_traits::{Signed, Zero}; +use serde::{Deserialize, Serialize, Serializer}; + +use crate::bigint::bigint_ser; + +/// A quantity of native tokens. +/// A token amount is an integer, but has a human interpretation as a value with +/// 18 decimal places. +/// This is a new-type in order to prevent accidental conversion from other BigInts. +/// From/Into BigInt is missing by design. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct TokenAmount { + atto: BigInt, +} + +// This type doesn't implement all the numeric traits (Num, Signed, etc), +// opting for a minimal useful set. Others can be added if needed. +impl TokenAmount { + /// The logical number of decimal places of a token unit. + pub const DECIMALS: usize = 18; + + /// The logical precision of a token unit. + pub const PRECISION: u64 = 10u64.pow(Self::DECIMALS as u32); + + /// Creates a token amount from a quantity of indivisible units (10^-18 whole units). + pub fn from_atto(atto: impl Into) -> Self { + Self { atto: atto.into() } + } + + /// Creates a token amount from nanoFIL. + pub fn from_nano(nano: impl Into) -> Self { + const NANO_PRECISION: u64 = 10u64.pow((TokenAmount::DECIMALS as u32) - 9); + Self { + atto: nano.into() * NANO_PRECISION, + } + } + + /// Creates a token amount from a quantity of whole units (10^18 indivisible units). + pub fn from_whole(tokens: impl Into) -> Self { + Self::from_atto(tokens.into() * Self::PRECISION) + } + + /// Returns the quantity of indivisible units. + pub fn atto(&self) -> &BigInt { + &self.atto + } + + pub fn is_zero(&self) -> bool { + self.atto.is_zero() + } + + pub fn is_positive(&self) -> bool { + self.atto.is_positive() + } + + pub fn is_negative(&self) -> bool { + self.atto.is_negative() + } +} + +impl Zero for TokenAmount { + #[inline] + fn zero() -> Self { + Self { + atto: BigInt::zero(), + } + } + + #[inline] + fn is_zero(&self) -> bool { + self.atto.is_zero() + } +} + +impl Ord for TokenAmount { + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + self.atto.cmp(&other.atto) + } +} + +impl PartialOrd for TokenAmount { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Default for TokenAmount { + #[inline] + fn default() -> TokenAmount { + TokenAmount::zero() + } +} + +impl fmt::Debug for TokenAmount { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TokenAmount({})", self) + } +} + +#[cfg(feature = "arb")] +impl quickcheck::Arbitrary for TokenAmount { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + TokenAmount::from_atto(BigInt::arbitrary(g)) + } +} + +/// Displays a token amount as a decimal in human units. +/// To avoid any confusion over whether the value is in human-scale or indivisible units, +/// the display always includes a decimal point. +impl fmt::Display for TokenAmount { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Implementation based on the bigdecimal library. + let (q, r) = self.atto.div_rem(&BigInt::from(Self::PRECISION)); + let before_decimal = q.abs().to_str_radix(10); + let after_decimal = if r.is_zero() { + "0".to_string() + } else { + let fraction_str = r.abs().to_str_radix(10); + let render = "0".repeat(Self::DECIMALS - fraction_str.len()) + fraction_str.as_str(); + render.trim_end_matches('0').to_string() + }; + + // Alter precision after the decimal point + let after_decimal = if let Some(precision) = f.precision() { + let len = after_decimal.len(); + if len < precision { + after_decimal + "0".repeat(precision - len).as_str() + } else { + after_decimal[0..precision].to_string() + } + } else { + after_decimal + }; + + // Always show the decimal point, even with ".0". + let complete_without_sign = before_decimal + "." + after_decimal.as_str(); + // Padding works even though we have a decimal point. + f.pad_integral(!self.atto().is_negative(), "", &complete_without_sign) + } +} + +impl Neg for TokenAmount { + type Output = TokenAmount; + + #[inline] + fn neg(self) -> TokenAmount { + TokenAmount { atto: -self.atto } + } +} + +impl<'a> Neg for &'a TokenAmount { + type Output = TokenAmount; + + #[inline] + fn neg(self) -> TokenAmount { + TokenAmount { + atto: (&self.atto).neg(), + } + } +} + +// Implements Add for all combinations of value/reference receiver and parameter. +// (Pattern copied from BigInt multiplication). +macro_rules! impl_add { + ($(impl<$($a:lifetime),*> Add<$Other:ty> for $Self:ty;)*) => {$( + impl<$($a),*> Add<$Other> for $Self { + type Output = TokenAmount; + + #[inline] + fn add(self, other: $Other) -> TokenAmount { + // automatically match value/ref + let TokenAmount { atto: x, .. } = self; + let TokenAmount { atto: y, .. } = other; + TokenAmount {atto: x + y} + } + } + )*} +} +impl_add! { + impl<> Add for TokenAmount; + impl<'b> Add<&'b TokenAmount> for TokenAmount; + impl<'a> Add for &'a TokenAmount; + impl<'a, 'b> Add<&'b TokenAmount> for &'a TokenAmount; +} + +impl AddAssign for TokenAmount { + #[inline] + fn add_assign(&mut self, other: TokenAmount) { + self.atto += &other.atto; + } +} + +impl<'a> AddAssign<&'a TokenAmount> for TokenAmount { + #[inline] + fn add_assign(&mut self, other: &TokenAmount) { + self.atto += &other.atto; + } +} + +// Implements Sub for all combinations of value/reference receiver and parameter. +macro_rules! impl_sub { + ($(impl<$($a:lifetime),*> Sub<$Other:ty> for $Self:ty;)*) => {$( + impl<$($a),*> Sub<$Other> for $Self { + type Output = TokenAmount; + + #[inline] + fn sub(self, other: $Other) -> TokenAmount { + // automatically match value/ref + let TokenAmount { atto: x, .. } = self; + let TokenAmount { atto: y, .. } = other; + TokenAmount {atto: x - y} + } + } + )*} +} +impl_sub! { + impl<> Sub for TokenAmount; + impl<'b> Sub<&'b TokenAmount> for TokenAmount; + impl<'a> Sub for &'a TokenAmount; + impl<'a, 'b> Sub<&'b TokenAmount> for &'a TokenAmount; +} + +impl SubAssign for TokenAmount { + #[inline] + fn sub_assign(&mut self, other: TokenAmount) { + self.atto -= &other.atto; + } +} + +impl<'a> SubAssign<&'a TokenAmount> for TokenAmount { + #[inline] + fn sub_assign(&mut self, other: &TokenAmount) { + self.atto -= &other.atto; + } +} + +impl Mul for TokenAmount +where + BigInt: Mul, +{ + type Output = TokenAmount; + + fn mul(self, rhs: T) -> Self::Output { + TokenAmount { + atto: self.atto * rhs, + } + } +} + +impl<'a, T> Mul for &'a TokenAmount +where + &'a BigInt: Mul, +{ + type Output = TokenAmount; + + fn mul(self, rhs: T) -> Self::Output { + TokenAmount { + atto: &self.atto * rhs, + } + } +} + +macro_rules! impl_mul { + ($(impl<$($a:lifetime),*> Mul<$Other:ty> for $Self:ty;)*) => {$( + impl<$($a),*> Mul<$Other> for $Self { + type Output = TokenAmount; + + #[inline] + fn mul(self, other: $Other) -> TokenAmount { + other * self + } + } + )*} +} + +macro_rules! impl_muls { + ($($t:ty,)*) => {$( + impl_mul! { + impl<> Mul for $t; + impl<'b> Mul<&'b TokenAmount> for $t; + impl<'a> Mul for &'a $t; + impl<'a, 'b> Mul<&'b TokenAmount> for &'a $t; + } + )*}; +} + +impl_muls! { + u8, u16, u32, u64, u128, + i8, i16, i32, i64, i128, + BigInt, +} + +impl MulAssign for TokenAmount +where + BigInt: MulAssign, +{ + #[inline] + fn mul_assign(&mut self, other: T) { + self.atto *= other; + } +} + +// Only a single div/rem method is implemented, rather than the full Div and Rem traits. +// Division isn't a common operation with money-like units, and deserves to be treated carefully. +impl TokenAmount { + #[inline] + pub fn div_rem(&self, other: impl Into) -> (TokenAmount, TokenAmount) { + let (q, r) = self.atto.div_rem(&other.into()); + (TokenAmount { atto: q }, TokenAmount { atto: r }) + } + + #[inline] + pub fn div_ceil(&self, other: impl Into) -> TokenAmount { + TokenAmount { + atto: self.atto.div_ceil(&other.into()), + } + } + + #[inline] + pub fn div_floor(&self, other: impl Into) -> TokenAmount { + TokenAmount { + atto: self.atto.div_floor(&other.into()), + } + } +} + +impl Sum for TokenAmount { + fn sum>(iter: I) -> Self { + Self::from_atto(iter.map(|t| t.atto).sum::()) + } +} + +impl<'a> Sum<&'a TokenAmount> for TokenAmount { + fn sum>(iter: I) -> Self { + Self::from_atto(iter.map(|t| &t.atto).sum::()) + } +} + +// Serialisation + +impl Serialize for TokenAmount { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + bigint_ser::serialize(&self.atto, serializer) + } +} + +impl<'de> Deserialize<'de> for TokenAmount { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + bigint_ser::deserialize(deserializer).map(|v| TokenAmount { atto: v }) + } +} + +#[cfg(test)] +mod test { + use num_bigint::BigInt; + use num_traits::Zero; + + use crate::TokenAmount; + + fn whole(x: impl Into) -> TokenAmount { + TokenAmount::from_whole(x) + } + + fn atto(x: impl Into) -> TokenAmount { + TokenAmount::from_atto(x.into()) + } + + #[test] + fn display_basic() { + fn basic(expected: &str, t: TokenAmount) { + assert_eq!(expected, format!("{}", t)); + } + + basic("0.0", TokenAmount::zero()); + basic("0.000000000000000001", atto(1)); + basic("0.000000000000001", atto(1000)); + basic("0.1234", atto(123_400_000_000_000_000_u64)); + basic("0.10101", atto(101_010_000_000_000_000_u64)); + basic("1.0", whole(1)); + basic("1.0", atto(1_000_000_000_000_000_000_u128)); + basic("1.1", atto(1_100_000_000_000_000_000_u128)); + basic("1.000000000000000001", atto(1_000_000_000_000_000_001_u128)); + basic( + "1234.000000000123456789", + whole(1234) + atto(123_456_789_u64), + ); + } + + #[test] + fn display_precision() { + assert_eq!("0.0", format!("{:.1}", TokenAmount::zero())); + assert_eq!("0.000", format!("{:.3}", TokenAmount::zero())); + assert_eq!("0.000", format!("{:.3}", atto(1))); // Truncated. + assert_eq!( + "0.123", + format!("{:.3}", atto(123_456_789_000_000_000_u64)) // Truncated. + ); + assert_eq!( + "0.123456789000", + format!("{:.12}", atto(123_456_789_000_000_000_u64)) + ); + } + + #[test] + fn display_padding() { + assert_eq!("0.0", format!("{:01}", TokenAmount::zero())); + assert_eq!("0.0", format!("{:03}", TokenAmount::zero())); + assert_eq!("000.0", format!("{:05}", TokenAmount::zero())); + assert_eq!( + "0.123", + format!("{:01.3}", atto(123_456_789_000_000_000_u64)) + ); + assert_eq!( + "00.123", + format!("{:06.3}", atto(123_456_789_000_000_000_u64)) + ); + } + + #[test] + fn display_negative() { + assert_eq!("-0.000001", format!("{:01}", -TokenAmount::from_nano(1000))); + } + + #[test] + fn ops() { + // Test the basic operations are wired up correctly. + assert_eq!(atto(15), atto(10) + atto(5)); + assert_eq!(atto(3), atto(10) - atto(7)); + assert_eq!(atto(12), atto(3) * 4); + let (q, r) = atto(14).div_rem(4); + assert_eq!((atto(3), atto(2)), (q, r)); + + let mut a = atto(1); + a += atto(2); + assert_eq!(atto(3), a); + a *= 2; + assert_eq!(atto(6), a); + a -= atto(2); + assert_eq!(atto(4), a); + } + + #[test] + fn nano_fil() { + assert_eq!( + TokenAmount::from_nano(1), + TokenAmount::from_whole(1).div_floor(10u64.pow(9)) + ) + } + + #[test] + fn test_mul() { + let a = atto(2) * 3; + let b = 3 * atto(2); + assert_eq!(a, atto(6)); + assert_eq!(a, b); + } + + #[test] + fn test_sum() { + assert_eq!( + [1, 2, 3, 4].into_iter().map(atto).sum::(), + atto(10) + ); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/error/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/error/mod.rs new file mode 100644 index 000000000000..3f4712304001 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/error/mod.rs @@ -0,0 +1,178 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::fmt::Formatter; + +use num_derive::FromPrimitive; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +/// ExitCode defines the exit code from the VM invocation. +#[derive(PartialEq, Eq, Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +#[repr(transparent)] +pub struct ExitCode { + value: u32, +} + +impl ExitCode { + pub const fn new(value: u32) -> Self { + Self { value } + } + + pub fn value(self) -> u32 { + self.value + } + + /// Returns true if the exit code indicates success. + pub fn is_success(self) -> bool { + self.value == 0 + } + + /// Returns true if the error code is in the range of exit codes reserved for the VM + /// (including Ok). + pub fn is_system_error(self) -> bool { + self.value < (Self::FIRST_USER_EXIT_CODE) + } +} + +impl From for ExitCode { + fn from(value: u32) -> Self { + ExitCode { value } + } +} + +impl std::fmt::Display for ExitCode { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.value) + } +} + +impl ExitCode { + // Exit codes which originate inside the VM. + // These values may not be used by actors when aborting. + + /// The code indicating successful execution. + pub const OK: ExitCode = ExitCode::new(0); + /// The message sender doesn't exist. + pub const SYS_SENDER_INVALID: ExitCode = ExitCode::new(1); + /// The message sender was not in a valid state to send this message. + /// + /// Either: + /// - The sender's nonce nonce didn't match the message nonce. + /// - The sender didn't have the funds to cover the message gas. + pub const SYS_SENDER_STATE_INVALID: ExitCode = ExitCode::new(2); + //pub const SYS_RESERVED_3 ExitCode = ExitCode::new(3); + /// The message receiver trapped (panicked). + pub const SYS_ILLEGAL_INSTRUCTION: ExitCode = ExitCode::new(4); + /// The message receiver either doesn't exist and can't be automatically created or it doesn't + /// implement the required entrypoint. + pub const SYS_INVALID_RECEIVER: ExitCode = ExitCode::new(5); + /// The message sender didn't have the requisite funds. + pub const SYS_INSUFFICIENT_FUNDS: ExitCode = ExitCode::new(6); + /// Message execution (including subcalls) used more gas than the specified limit. + pub const SYS_OUT_OF_GAS: ExitCode = ExitCode::new(7); + // pub const SYS_RESERVED_8: ExitCode = ExitCode::new(8); + /// The message receiver aborted with a reserved exit code. + pub const SYS_ILLEGAL_EXIT_CODE: ExitCode = ExitCode::new(9); + /// An internal VM assertion failed. + pub const SYS_ASSERTION_FAILED: ExitCode = ExitCode::new(10); + /// The actor returned a block handle that doesn't exist + pub const SYS_MISSING_RETURN: ExitCode = ExitCode::new(11); + // pub const SYS_RESERVED_12: ExitCode = ExitCode::new(12); + // pub const SYS_RESERVED_13: ExitCode = ExitCode::new(13); + // pub const SYS_RESERVED_14: ExitCode = ExitCode::new(14); + // pub const SYS_RESERVED_15: ExitCode = ExitCode::new(15); + + /// The lowest exit code that an actor may abort with. + pub const FIRST_USER_EXIT_CODE: u32 = 16; + + // Standard exit codes according to the built-in actors' calling convention. + /// The method parameters are invalid. + pub const USR_ILLEGAL_ARGUMENT: ExitCode = ExitCode::new(16); + /// The requested resource does not exist. + pub const USR_NOT_FOUND: ExitCode = ExitCode::new(17); + /// The requested operation is forbidden. + pub const USR_FORBIDDEN: ExitCode = ExitCode::new(18); + /// The actor has insufficient funds to perform the requested operation. + pub const USR_INSUFFICIENT_FUNDS: ExitCode = ExitCode::new(19); + /// The actor's internal state is invalid. + pub const USR_ILLEGAL_STATE: ExitCode = ExitCode::new(20); + /// There was a de/serialization failure within actor code. + pub const USR_SERIALIZATION: ExitCode = ExitCode::new(21); + /// The message cannot be handled (usually indicates an unhandled method number). + pub const USR_UNHANDLED_MESSAGE: ExitCode = ExitCode::new(22); + /// The actor failed with an unspecified error. + pub const USR_UNSPECIFIED: ExitCode = ExitCode::new(23); + /// The actor failed a user-level assertion. + pub const USR_ASSERTION_FAILED: ExitCode = ExitCode::new(24); + /// The requested operation cannot be performed in "read-only" mode. + pub const USR_READ_ONLY: ExitCode = ExitCode::new(25); + /// The method cannot handle a transfer of value. + pub const USR_NOT_PAYABLE: ExitCode = ExitCode::new(26); + // pub const RESERVED_27: ExitCode = ExitCode::new(27); + // pub const RESERVED_28: ExitCode = ExitCode::new(28); + // pub const RESERVED_29: ExitCode = ExitCode::new(29); + // pub const RESERVED_30: ExitCode = ExitCode::new(30); + // pub const RESERVED_31: ExitCode = ExitCode::new(31); +} + +/// When a syscall fails, it returns an `ErrorNumber` to indicate why. The syscalls themselves +/// include documentation on _which_ syscall errors they can be expected to return, and what they +/// mean in the context of the syscall. +#[non_exhaustive] +#[repr(u32)] +#[derive(Copy, Clone, Eq, Debug, PartialEq, Error, FromPrimitive)] +pub enum ErrorNumber { + /// A syscall parameters was invalid. + IllegalArgument = 1, + /// The actor is not in the correct state to perform the requested operation. + IllegalOperation = 2, + /// This syscall would exceed some system limit (memory, lookback, call depth, etc.). + LimitExceeded = 3, + /// A system-level assertion has failed. + /// + /// # Note + /// + /// Non-system actors should never receive this error number. A system-level assertion will + /// cause the entire message to fail. + AssertionFailed = 4, + /// There were insufficient funds to complete the requested operation. + InsufficientFunds = 5, + /// A resource was not found. + NotFound = 6, + /// The specified IPLD block handle was invalid. + InvalidHandle = 7, + /// The requested CID shape (multihash codec, multihash length) isn't supported. + IllegalCid = 8, + /// The requested IPLD codec isn't supported. + IllegalCodec = 9, + /// The IPLD block did not match the specified IPLD codec. + Serialization = 10, + /// The operation is forbidden. + Forbidden = 11, + /// The passed buffer is too small. + BufferTooSmall = 12, + /// The actor is executing in a read-only context. + ReadOnly = 13, +} + +impl std::fmt::Display for ErrorNumber { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + use ErrorNumber::*; + f.write_str(match *self { + IllegalArgument => "illegal argument", + IllegalOperation => "illegal operation", + LimitExceeded => "limit exceeded", + AssertionFailed => "filecoin assertion failed", + InsufficientFunds => "insufficient funds", + NotFound => "resource not found", + InvalidHandle => "invalid ipld block handle", + IllegalCid => "illegal cid specification", + IllegalCodec => "illegal ipld codec", + Serialization => "serialization error", + Forbidden => "operation forbidden", + BufferTooSmall => "buffer too small", + ReadOnly => "execution context is read-only", + }) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/event/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/event/mod.rs new file mode 100644 index 000000000000..ad983d7e570a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/event/mod.rs @@ -0,0 +1,63 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use bitflags::bitflags; +use fvm_ipld_encoding::strict_bytes; +use serde::{Deserialize, Serialize}; +use serde_tuple::*; + +use crate::ActorID; + +/// Event with extra information stamped by the FVM. This is the structure that gets committed +/// on-chain via the receipt. +#[derive(Serialize_tuple, Deserialize_tuple, PartialEq, Eq, Clone, Debug)] +pub struct StampedEvent { + /// Carries the ID of the actor that emitted this event. + pub emitter: ActorID, + /// The event as emitted by the actor. + pub event: ActorEvent, +} + +impl StampedEvent { + pub fn new(emitter: ActorID, event: ActorEvent) -> Self { + Self { emitter, event } + } +} + +/// An event as originally emitted by the actor. +#[derive(Serialize_tuple, Deserialize_tuple, PartialEq, Eq, Clone, Debug)] +#[serde(transparent)] +pub struct ActorEvent { + pub entries: Vec, +} + +impl From> for ActorEvent { + fn from(entries: Vec) -> Self { + Self { entries } + } +} + +bitflags! { + /// Flags associated with an Event entry. + #[derive(Deserialize, Serialize, Copy, Clone, Eq, PartialEq, Debug)] + #[repr(transparent)] // we pass this type through a syscall + #[serde(transparent)] + pub struct Flags: u64 { + const FLAG_INDEXED_KEY = 0b00000001; + const FLAG_INDEXED_VALUE = 0b00000010; + const FLAG_INDEXED_ALL = Self::FLAG_INDEXED_KEY.bits() | Self::FLAG_INDEXED_VALUE.bits(); + } +} + +/// A key value entry inside an Event. +#[derive(Serialize_tuple, Deserialize_tuple, PartialEq, Eq, Clone, Debug)] +pub struct Entry { + /// A bitmap conveying metadata or hints about this entry. + pub flags: Flags, + /// The key of this event. + pub key: String, + /// The value's codec. Must be IPLD_RAW (0x55) for now according to FIP-0049. + pub codec: u64, + /// The event's value. + #[serde(with = "strict_bytes")] + pub value: Vec, +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/lib.rs new file mode 100644 index 000000000000..455110c60e50 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/lib.rs @@ -0,0 +1,162 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] + +#[macro_use] +extern crate lazy_static; + +use address::Address; +use cid::Cid; +use clock::ChainEpoch; + +pub mod address; +pub mod bigint; +pub mod chainid; +pub mod clock; +pub mod commcid; +pub mod consensus; +pub mod crypto; +pub mod deal; +pub mod econ; +pub mod error; +pub mod event; +pub mod math; +pub mod message; +pub mod piece; +pub mod randomness; +pub mod receipt; +pub mod reward; +pub mod sector; +pub mod smooth; +pub mod state; +pub mod sys; +pub mod upgrade; +pub mod version; + +use cid::multihash::Multihash; +use crypto::hash::SupportedHashes; +use econ::TokenAmount; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_ipld_encoding::DAG_CBOR; + +use crate::error::ExitCode; + +lazy_static! { + /// Total Filecoin available to the network. + pub static ref TOTAL_FILECOIN: TokenAmount = TokenAmount::from_whole(TOTAL_FILECOIN_BASE); + + /// Zero address used to avoid allowing it to be used for verification. + /// This is intentionally disallowed because it is an edge case with Filecoin's BLS + /// signature verification. + pub static ref ZERO_ADDRESS: Address = address::Network::Mainnet.parse_address("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a").unwrap(); +} + +/// Codec for raw data. +pub const IPLD_RAW: u64 = 0x55; + +/// Multihash code for the identity hash function. +pub const IDENTITY_HASH: u64 = 0x0; + +/// The maximum supported CID size. +pub const MAX_CID_LEN: usize = 100; + +/// Identifier for Actors, includes builtin and initialized actors +pub type ActorID = u64; + +/// Default bit width for the hamt in the filecoin protocol. +pub const HAMT_BIT_WIDTH: u32 = 5; +/// Total gas limit allowed per block. This is shared across networks. +pub const BLOCK_GAS_LIMIT: u64 = 10_000_000_000; +/// Total Filecoin supply. +pub const TOTAL_FILECOIN_BASE: i64 = 2_000_000_000; + +// Epochs +/// Lookback height for retrieving ticket randomness. +pub const TICKET_RANDOMNESS_LOOKBACK: ChainEpoch = 1; +/// Epochs to look back for verifying PoSt proofs. +pub const WINNING_POST_SECTOR_SET_LOOKBACK: ChainEpoch = 10; + +/// The expected number of block producers in each epoch. +pub const BLOCKS_PER_EPOCH: u64 = 5; + +/// Allowable clock drift in validations. +pub const ALLOWABLE_CLOCK_DRIFT: u64 = 1; + +/// Config trait which handles different network configurations. +pub trait NetworkParams { + /// Total filecoin available to network. + const TOTAL_FILECOIN: i64; + + /// Available rewards for mining. + const MINING_REWARD_TOTAL: i64; + + /// Initial reward actor balance. This function is only called in genesis setting up state. + fn initial_reward_balance() -> TokenAmount { + TokenAmount::from_whole(Self::MINING_REWARD_TOTAL) + } +} + +/// Params for the network. This is now continued on into mainnet and is static across networks. +// * This can be removed in the future if the new testnet is configred at build time +// * but the reason to keep as is, is for an easier transition to runtime configuration. +pub struct DefaultNetworkParams; + +impl NetworkParams for DefaultNetworkParams { + const TOTAL_FILECOIN: i64 = TOTAL_FILECOIN_BASE; + const MINING_REWARD_TOTAL: i64 = 1_400_000_000; +} + +/// Method number indicator for calling actor methods. +pub type MethodNum = u64; + +/// Base actor send method. +pub const METHOD_SEND: MethodNum = 0; +/// Base actor constructor method. +pub const METHOD_CONSTRUCTOR: MethodNum = 1; + +/// The outcome of a `Send`, covering its ExitCode and optional return data +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct Response { + pub exit_code: ExitCode, + pub return_data: Option, +} + +// This is a somewhat nasty hack that lets us unwrap in a const function. +const fn const_unwrap(r: Result) -> T { + let v = match r { + Ok(v) => v, + Err(_) => panic!(), // aborts at compile time + }; + // given the match above, this will _only_ drop `Ok(T)` where `T` is copy, so it won't actually + // do anything. However, we need it to convince the compiler that we never drop `Err(E)` because + // `E` likely isn't `Copy` (and therefore can't be "dropped" at compile time. + std::mem::forget(r); + v +} + +// 45b0cfc220ceec5b7c1c62c4d4193d38e4eba48e8815729ce75f9c0ab0e4c1c0 +const EMPTY_ARR_HASH_DIGEST: &[u8] = &[ + 0x45, 0xb0, 0xcf, 0xc2, 0x20, 0xce, 0xec, 0x5b, 0x7c, 0x1c, 0x62, 0xc4, 0xd4, 0x19, 0x3d, 0x38, + 0xe4, 0xeb, 0xa4, 0x8e, 0x88, 0x15, 0x72, 0x9c, 0xe7, 0x5f, 0x9c, 0x0a, 0xb0, 0xe4, 0xc1, 0xc0, +]; + +// bafy2bzacebc3bt6cedhoyw34drrmjvazhu4oj25er2ebk4u445pzycvq4ta4a +pub const EMPTY_ARR_CID: Cid = Cid::new_v1( + DAG_CBOR, + const_unwrap(Multihash::wrap( + SupportedHashes::Blake2b256 as u64, + EMPTY_ARR_HASH_DIGEST, + )), +); + +#[test] +fn test_empty_arr_cid() { + use fvm_ipld_encoding::to_vec; + use multihash_codetable::{Code, MultihashDigest}; + + let empty = to_vec::<[(); 0]>(&[]).unwrap(); + let expected = Cid::new_v1(DAG_CBOR, Code::Blake2b256.digest(&empty)); + assert_eq!(EMPTY_ARR_CID, expected); +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/math.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/math.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/math.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/math.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/message.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/message.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/message.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/message.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/piece/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/piece/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/piece/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/piece/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/piece/zero.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/piece/zero.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/piece/zero.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/piece/zero.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/randomness/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/randomness/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/randomness/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/randomness/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/receipt.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/receipt.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/receipt.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/receipt.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/reward.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/reward.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/reward.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/reward.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/post.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/post.rs similarity index 90% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/post.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/post.rs index a2b77e08ee1b..e9670bfa1655 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/post.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/post.rs @@ -40,11 +40,6 @@ impl quickcheck::Arbitrary for PoStProof { RegisteredPoStProof::StackedDRGWinning512MiBV1, RegisteredPoStProof::StackedDRGWinning32GiBV1, RegisteredPoStProof::StackedDRGWinning64GiBV1, - RegisteredPoStProof::StackedDRGWindow2KiBV1, - RegisteredPoStProof::StackedDRGWindow8MiBV1, - RegisteredPoStProof::StackedDRGWindow512MiBV1, - RegisteredPoStProof::StackedDRGWindow32GiBV1, - RegisteredPoStProof::StackedDRGWindow64GiBV1, RegisteredPoStProof::StackedDRGWindow2KiBV1P1, RegisteredPoStProof::StackedDRGWindow8MiBV1P1, RegisteredPoStProof::StackedDRGWindow512MiBV1P1, diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/registered_proof.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/registered_proof.rs new file mode 100644 index 000000000000..b685baeecd4a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/registered_proof.rs @@ -0,0 +1,586 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +#[cfg(feature = "proofs")] +use std::convert::TryFrom; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use super::SectorSize; +use crate::clock; +use crate::version::NetworkVersion; + +/// Seal proof type which defines the version and sector size. +#[allow(non_camel_case_types)] +#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] +pub enum RegisteredSealProof { + StackedDRG2KiBV1, + StackedDRG512MiBV1, + StackedDRG8MiBV1, + StackedDRG32GiBV1, + StackedDRG64GiBV1, + + StackedDRG2KiBV1P1, + StackedDRG512MiBV1P1, + StackedDRG8MiBV1P1, + StackedDRG32GiBV1P1, + StackedDRG64GiBV1P1, + + StackedDRG2KiBV1P1_Feat_SyntheticPoRep, + StackedDRG512MiBV1P1_Feat_SyntheticPoRep, + StackedDRG8MiBV1P1_Feat_SyntheticPoRep, + StackedDRG32GiBV1P1_Feat_SyntheticPoRep, + StackedDRG64GiBV1P1_Feat_SyntheticPoRep, + + StackedDRG2KiBV1P2_Feat_NiPoRep, + StackedDRG512MiBV1P2_Feat_NiPoRep, + StackedDRG8MiBV1P2_Feat_NiPoRep, + StackedDRG32GiBV1P2_Feat_NiPoRep, + StackedDRG64GiBV1P2_Feat_NiPoRep, + // TODO: get rid of this option once we no longer need go compat. + // We use it to ensure that we can deserialize bad values here because go checks this value + // later. + Invalid(i64), +} + +impl RegisteredSealProof { + /// Returns registered seal proof for given sector size + pub fn from_sector_size(size: SectorSize, network_version: NetworkVersion) -> Self { + if network_version < NetworkVersion::V7 { + match size { + SectorSize::_2KiB => Self::StackedDRG2KiBV1, + SectorSize::_8MiB => Self::StackedDRG8MiBV1, + SectorSize::_512MiB => Self::StackedDRG512MiBV1, + SectorSize::_32GiB => Self::StackedDRG32GiBV1, + SectorSize::_64GiB => Self::StackedDRG64GiBV1, + } + } else { + match size { + SectorSize::_2KiB => Self::StackedDRG2KiBV1P1, + SectorSize::_8MiB => Self::StackedDRG8MiBV1P1, + SectorSize::_512MiB => Self::StackedDRG512MiBV1P1, + SectorSize::_32GiB => Self::StackedDRG32GiBV1P1, + SectorSize::_64GiB => Self::StackedDRG64GiBV1P1, + } + } + } + + /// Convert the original proof type to the v1 proof added in network version 7. + pub fn update_to_v1(&mut self) { + *self = match self { + Self::StackedDRG2KiBV1 => Self::StackedDRG2KiBV1P1, + Self::StackedDRG512MiBV1 => Self::StackedDRG512MiBV1P1, + Self::StackedDRG8MiBV1 => Self::StackedDRG8MiBV1P1, + Self::StackedDRG32GiBV1 => Self::StackedDRG32GiBV1P1, + Self::StackedDRG64GiBV1 => Self::StackedDRG64GiBV1P1, + _ => return, + }; + } + + #[deprecated(since = "0.1.10", note = "Logic should exist in actors")] + /// The maximum duration a sector sealed with this proof may exist between activation and expiration. + pub fn sector_maximum_lifetime(self) -> clock::ChainEpoch { + // For all Stacked DRG sectors, the max is 5 years + let epochs_per_year = 1_262_277; + 5 * epochs_per_year + } + + /// Proof size for each SealProof type + pub fn proof_size(self) -> Result { + use RegisteredSealProof::*; + match self { + StackedDRG2KiBV1 + | StackedDRG512MiBV1 + | StackedDRG8MiBV1 + | StackedDRG2KiBV1P1 + | StackedDRG512MiBV1P1 + | StackedDRG8MiBV1P1 + | StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | StackedDRG8MiBV1P1_Feat_SyntheticPoRep => Ok(192), + + StackedDRG2KiBV1P2_Feat_NiPoRep + | StackedDRG512MiBV1P2_Feat_NiPoRep + | StackedDRG8MiBV1P2_Feat_NiPoRep => Ok(14_164), + + StackedDRG32GiBV1 + | StackedDRG64GiBV1 + | StackedDRG32GiBV1P1 + | StackedDRG64GiBV1P1 + | StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | StackedDRG64GiBV1P1_Feat_SyntheticPoRep => Ok(1_920), + + StackedDRG32GiBV1P2_Feat_NiPoRep | StackedDRG64GiBV1P2_Feat_NiPoRep => Ok(23_092), + + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } +} + +impl Default for RegisteredSealProof { + fn default() -> Self { + Self::Invalid(-1) + } +} + +/// Proof of spacetime type, indicating version and sector size of the proof. +#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] +#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] +pub enum RegisteredPoStProof { + StackedDRGWinning2KiBV1, + StackedDRGWinning8MiBV1, + StackedDRGWinning512MiBV1, + StackedDRGWinning32GiBV1, + StackedDRGWinning64GiBV1, + StackedDRGWindow2KiBV1P1, + StackedDRGWindow8MiBV1P1, + StackedDRGWindow512MiBV1P1, + StackedDRGWindow32GiBV1P1, + StackedDRGWindow64GiBV1P1, + Invalid(i64), +} + +impl RegisteredPoStProof { + /// Returns the sector size of the proof type, which is measured in bytes. + pub fn sector_size(self) -> Result { + use RegisteredPoStProof::*; + match self { + StackedDRGWindow2KiBV1P1 | StackedDRGWinning2KiBV1 => Ok(SectorSize::_2KiB), + StackedDRGWindow8MiBV1P1 | StackedDRGWinning8MiBV1 => Ok(SectorSize::_8MiB), + StackedDRGWindow512MiBV1P1 | StackedDRGWinning512MiBV1 => Ok(SectorSize::_512MiB), + StackedDRGWindow32GiBV1P1 | StackedDRGWinning32GiBV1 => Ok(SectorSize::_32GiB), + StackedDRGWindow64GiBV1P1 | StackedDRGWinning64GiBV1 => Ok(SectorSize::_64GiB), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } + + /// Proof size for each PoStProof type + pub fn proof_size(self) -> Result { + use RegisteredPoStProof::*; + match self { + StackedDRGWinning2KiBV1 + | StackedDRGWinning8MiBV1 + | StackedDRGWinning512MiBV1 + | StackedDRGWinning32GiBV1 + | StackedDRGWinning64GiBV1 + | StackedDRGWindow2KiBV1P1 + | StackedDRGWindow8MiBV1P1 + | StackedDRGWindow512MiBV1P1 + | StackedDRGWindow32GiBV1P1 + | StackedDRGWindow64GiBV1P1 => Ok(192), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } + /// Returns the partition size, in sectors, associated with a proof type. + /// The partition size is the number of sectors proven in a single PoSt proof. + pub fn window_post_partitions_sector(self) -> Result { + // Resolve to post proof and then compute size from that. + use RegisteredPoStProof::*; + match self { + StackedDRGWinning64GiBV1 | StackedDRGWindow64GiBV1P1 => Ok(2300), + StackedDRGWinning32GiBV1 | StackedDRGWindow32GiBV1P1 => Ok(2349), + StackedDRGWinning2KiBV1 | StackedDRGWindow2KiBV1P1 => Ok(2), + StackedDRGWinning8MiBV1 | StackedDRGWindow8MiBV1P1 => Ok(2), + StackedDRGWinning512MiBV1 | StackedDRGWindow512MiBV1P1 => Ok(2), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } +} + +impl RegisteredSealProof { + /// Returns the sector size of the proof type, which is measured in bytes. + pub fn sector_size(self) -> Result { + use RegisteredSealProof::*; + match self { + StackedDRG2KiBV1 + | StackedDRG2KiBV1P1 + | StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | StackedDRG2KiBV1P2_Feat_NiPoRep => Ok(SectorSize::_2KiB), + StackedDRG8MiBV1 + | StackedDRG8MiBV1P1 + | StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | StackedDRG8MiBV1P2_Feat_NiPoRep => Ok(SectorSize::_8MiB), + StackedDRG512MiBV1 + | StackedDRG512MiBV1P1 + | StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | StackedDRG512MiBV1P2_Feat_NiPoRep => Ok(SectorSize::_512MiB), + StackedDRG32GiBV1 + | StackedDRG32GiBV1P1 + | StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | StackedDRG32GiBV1P2_Feat_NiPoRep => Ok(SectorSize::_32GiB), + StackedDRG64GiBV1 + | StackedDRG64GiBV1P1 + | StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | StackedDRG64GiBV1P2_Feat_NiPoRep => Ok(SectorSize::_64GiB), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } + + /// Returns the partition size, in sectors, associated with a proof type. + /// The partition size is the number of sectors proven in a single PoSt proof. + pub fn window_post_partitions_sector(self) -> Result { + // Resolve to seal proof and then compute size from that. + use RegisteredSealProof::*; + match self { + StackedDRG64GiBV1 + | StackedDRG64GiBV1P1 + | StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | StackedDRG64GiBV1P2_Feat_NiPoRep => Ok(2300), + StackedDRG32GiBV1 + | StackedDRG32GiBV1P1 + | StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | StackedDRG32GiBV1P2_Feat_NiPoRep => Ok(2349), + StackedDRG2KiBV1 + | StackedDRG2KiBV1P1 + | StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | StackedDRG2KiBV1P2_Feat_NiPoRep => Ok(2), + StackedDRG8MiBV1 + | StackedDRG8MiBV1P1 + | StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | StackedDRG8MiBV1P2_Feat_NiPoRep => Ok(2), + StackedDRG512MiBV1 + | StackedDRG512MiBV1P1 + | StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | StackedDRG512MiBV1P2_Feat_NiPoRep => Ok(2), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } + + /// Produces the windowed PoSt-specific RegisteredProof corresponding + /// to the receiving RegisteredProof. + pub fn registered_window_post_proof(self) -> Result { + use RegisteredPoStProof::*; + match self { + Self::StackedDRG64GiBV1 + | Self::StackedDRG64GiBV1P1 + | Self::StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG64GiBV1P2_Feat_NiPoRep => Ok(StackedDRGWindow64GiBV1P1), + Self::StackedDRG32GiBV1 + | Self::StackedDRG32GiBV1P1 + | Self::StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG32GiBV1P2_Feat_NiPoRep => Ok(StackedDRGWindow32GiBV1P1), + Self::StackedDRG2KiBV1 + | Self::StackedDRG2KiBV1P1 + | Self::StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG2KiBV1P2_Feat_NiPoRep => Ok(StackedDRGWindow2KiBV1P1), + Self::StackedDRG8MiBV1 + | Self::StackedDRG8MiBV1P1 + | Self::StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG8MiBV1P2_Feat_NiPoRep => Ok(StackedDRGWindow8MiBV1P1), + Self::StackedDRG512MiBV1 + | Self::StackedDRG512MiBV1P1 + | Self::StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG512MiBV1P2_Feat_NiPoRep => Ok(StackedDRGWindow512MiBV1P1), + Self::Invalid(_) => Err(format!( + "Unsupported mapping from {:?} to PoSt-window RegisteredProof", + self + )), + } + } + + /// Produces the update RegisteredProof corresponding to the receiving RegisteredProof. + pub fn registered_update_proof(self) -> Result { + use RegisteredUpdateProof::*; + match self { + Self::StackedDRG64GiBV1 + | Self::StackedDRG64GiBV1P1 + | Self::StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG64GiBV1P2_Feat_NiPoRep => Ok(StackedDRG64GiBV1), + Self::StackedDRG32GiBV1 + | Self::StackedDRG32GiBV1P1 + | Self::StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG32GiBV1P2_Feat_NiPoRep => Ok(StackedDRG32GiBV1), + Self::StackedDRG2KiBV1 + | Self::StackedDRG2KiBV1P1 + | Self::StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG2KiBV1P2_Feat_NiPoRep => Ok(StackedDRG2KiBV1), + Self::StackedDRG8MiBV1 + | Self::StackedDRG8MiBV1P1 + | Self::StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG8MiBV1P2_Feat_NiPoRep => Ok(StackedDRG8MiBV1), + Self::StackedDRG512MiBV1 + | Self::StackedDRG512MiBV1P1 + | Self::StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | Self::StackedDRG512MiBV1P2_Feat_NiPoRep => Ok(StackedDRG512MiBV1), + Self::Invalid(_) => Err(format!( + "Unsupported mapping from {:?} to Update RegisteredProof", + self + )), + } + } +} + +/// Seal proof type which defines the version and sector size. +#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] +pub enum RegisteredAggregateProof { + SnarkPackV1, + SnarkPackV2, + Invalid(i64), +} + +/// Proof of update type +#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] +pub enum RegisteredUpdateProof { + StackedDRG2KiBV1, + StackedDRG8MiBV1, + StackedDRG512MiBV1, + StackedDRG32GiBV1, + StackedDRG64GiBV1, + Invalid(i64), +} + +macro_rules! i64_conversion { + ($ty:ident; $( $var:ident => $val:expr, )*) => { + impl From for $ty { + fn from(value: i64) -> Self { + match value { + $( $val => $ty::$var, )* + other => $ty::Invalid(other), + } + } + } + impl From<$ty> for i64 { + fn from(proof: $ty) -> Self { + match proof { + $( $ty::$var => $val, )* + $ty::Invalid(other) => other, + } + } + } + } +} + +i64_conversion! { + RegisteredPoStProof; + StackedDRGWinning2KiBV1 => 0, + StackedDRGWinning8MiBV1 => 1, + StackedDRGWinning512MiBV1 => 2, + StackedDRGWinning32GiBV1 => 3, + StackedDRGWinning64GiBV1 => 4, + // The hole here is due to the removal of v1 PoSt proof types. + // We can not modify those numbers since they are consensus-critical. + StackedDRGWindow2KiBV1P1 => 10, + StackedDRGWindow8MiBV1P1 => 11, + StackedDRGWindow512MiBV1P1 => 12, + StackedDRGWindow32GiBV1P1 => 13, + StackedDRGWindow64GiBV1P1 => 14, +} + +i64_conversion! { + RegisteredSealProof; + StackedDRG2KiBV1 => 0, + StackedDRG8MiBV1 => 1, + StackedDRG512MiBV1 => 2, + StackedDRG32GiBV1 => 3, + StackedDRG64GiBV1 => 4, + + StackedDRG2KiBV1P1 => 5, + StackedDRG8MiBV1P1 => 6, + StackedDRG512MiBV1P1 => 7, + StackedDRG32GiBV1P1 => 8, + StackedDRG64GiBV1P1 => 9, + + StackedDRG2KiBV1P1_Feat_SyntheticPoRep => 10, + StackedDRG8MiBV1P1_Feat_SyntheticPoRep => 11, + StackedDRG512MiBV1P1_Feat_SyntheticPoRep => 12, + StackedDRG32GiBV1P1_Feat_SyntheticPoRep => 13, + StackedDRG64GiBV1P1_Feat_SyntheticPoRep => 14, + + StackedDRG2KiBV1P2_Feat_NiPoRep => 15, + StackedDRG8MiBV1P2_Feat_NiPoRep => 16, + StackedDRG512MiBV1P2_Feat_NiPoRep => 17, + StackedDRG32GiBV1P2_Feat_NiPoRep => 18, + StackedDRG64GiBV1P2_Feat_NiPoRep => 19, +} + +i64_conversion! { + RegisteredAggregateProof; + SnarkPackV1 => 0, + SnarkPackV2 => 1, +} + +i64_conversion! { + RegisteredUpdateProof; + StackedDRG2KiBV1 => 0, + StackedDRG8MiBV1 => 1, + StackedDRG512MiBV1 => 2, + StackedDRG32GiBV1 => 3, + StackedDRG64GiBV1 => 4, +} +#[cfg(feature = "proofs")] +impl TryFrom for filecoin_proofs_api::RegisteredAggregationProof { + type Error = String; + fn try_from(p: RegisteredAggregateProof) -> Result { + use RegisteredAggregateProof::*; + match p { + SnarkPackV1 => Ok(Self::SnarkPackV1), + SnarkPackV2 => Ok(Self::SnarkPackV2), + Invalid(i) => Err(format!("unsupported aggregate proof type: {}", i)), + } + } +} + +#[cfg(feature = "proofs")] +impl TryFrom for filecoin_proofs_api::RegisteredSealProof { + type Error = String; + fn try_from(p: RegisteredSealProof) -> Result { + use RegisteredSealProof::*; + match p { + StackedDRG64GiBV1 => Ok(Self::StackedDrg64GiBV1), + StackedDRG32GiBV1 => Ok(Self::StackedDrg32GiBV1), + StackedDRG2KiBV1 => Ok(Self::StackedDrg2KiBV1), + StackedDRG8MiBV1 => Ok(Self::StackedDrg8MiBV1), + StackedDRG512MiBV1 => Ok(Self::StackedDrg512MiBV1), + StackedDRG64GiBV1P1 => Ok(Self::StackedDrg64GiBV1_1), + StackedDRG32GiBV1P1 => Ok(Self::StackedDrg32GiBV1_1), + StackedDRG2KiBV1P1 => Ok(Self::StackedDrg2KiBV1_1), + StackedDRG8MiBV1P1 => Ok(Self::StackedDrg8MiBV1_1), + StackedDRG512MiBV1P1 => Ok(Self::StackedDrg512MiBV1_1), + StackedDRG64GiBV1P1_Feat_SyntheticPoRep => { + Ok(Self::StackedDrg64GiBV1_1_Feat_SyntheticPoRep) + } + StackedDRG32GiBV1P1_Feat_SyntheticPoRep => { + Ok(Self::StackedDrg32GiBV1_1_Feat_SyntheticPoRep) + } + StackedDRG2KiBV1P1_Feat_SyntheticPoRep => { + Ok(Self::StackedDrg2KiBV1_1_Feat_SyntheticPoRep) + } + StackedDRG8MiBV1P1_Feat_SyntheticPoRep => { + Ok(Self::StackedDrg8MiBV1_1_Feat_SyntheticPoRep) + } + StackedDRG512MiBV1P1_Feat_SyntheticPoRep => { + Ok(Self::StackedDrg512MiBV1_1_Feat_SyntheticPoRep) + } + StackedDRG2KiBV1P2_Feat_NiPoRep => { + Ok(Self::StackedDrg2KiBV1_2_Feat_NonInteractivePoRep) + } + StackedDRG512MiBV1P2_Feat_NiPoRep => { + Ok(Self::StackedDrg512MiBV1_2_Feat_NonInteractivePoRep) + } + StackedDRG8MiBV1P2_Feat_NiPoRep => { + Ok(Self::StackedDrg8MiBV1_2_Feat_NonInteractivePoRep) + } + StackedDRG32GiBV1P2_Feat_NiPoRep => { + Ok(Self::StackedDrg32GiBV1_2_Feat_NonInteractivePoRep) + } + StackedDRG64GiBV1P2_Feat_NiPoRep => { + Ok(Self::StackedDrg64GiBV1_2_Feat_NonInteractivePoRep) + } + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } +} + +#[cfg(feature = "proofs")] +impl TryFrom for filecoin_proofs_api::RegisteredPoStProof { + type Error = String; + fn try_from(p: RegisteredPoStProof) -> Result { + use RegisteredPoStProof::*; + match p { + StackedDRGWinning2KiBV1 => Ok(Self::StackedDrgWinning2KiBV1), + StackedDRGWinning8MiBV1 => Ok(Self::StackedDrgWinning8MiBV1), + StackedDRGWinning512MiBV1 => Ok(Self::StackedDrgWinning512MiBV1), + StackedDRGWinning32GiBV1 => Ok(Self::StackedDrgWinning32GiBV1), + StackedDRGWinning64GiBV1 => Ok(Self::StackedDrgWinning64GiBV1), + StackedDRGWindow2KiBV1P1 => Ok(Self::StackedDrgWindow2KiBV1_2), + StackedDRGWindow8MiBV1P1 => Ok(Self::StackedDrgWindow8MiBV1_2), + StackedDRGWindow512MiBV1P1 => Ok(Self::StackedDrgWindow512MiBV1_2), + StackedDRGWindow32GiBV1P1 => Ok(Self::StackedDrgWindow32GiBV1_2), + StackedDRGWindow64GiBV1P1 => Ok(Self::StackedDrgWindow64GiBV1_2), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } +} + +#[cfg(feature = "proofs")] +impl TryFrom for filecoin_proofs_api::RegisteredUpdateProof { + type Error = String; + fn try_from(p: RegisteredUpdateProof) -> Result { + use RegisteredUpdateProof::*; + match p { + StackedDRG2KiBV1 => Ok(Self::StackedDrg2KiBV1), + StackedDRG8MiBV1 => Ok(Self::StackedDrg8MiBV1), + StackedDRG512MiBV1 => Ok(Self::StackedDrg512MiBV1), + StackedDRG32GiBV1 => Ok(Self::StackedDrg32GiBV1), + StackedDRG64GiBV1 => Ok(Self::StackedDrg64GiBV1), + Invalid(i) => Err(format!("unsupported proof type: {}", i)), + } + } +} + +impl Serialize for RegisteredPoStProof { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + i64::from(*self).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for RegisteredPoStProof { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let val = i64::deserialize(deserializer)?; + Ok(Self::from(val)) + } +} + +impl Serialize for RegisteredSealProof { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + i64::from(*self).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for RegisteredSealProof { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let val = i64::deserialize(deserializer)?; + Ok(Self::from(val)) + } +} + +impl Serialize for RegisteredAggregateProof { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + i64::from(*self).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for RegisteredAggregateProof { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let val = i64::deserialize(deserializer)?; + Ok(Self::from(val)) + } +} + +impl Serialize for RegisteredUpdateProof { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + i64::from(*self).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for RegisteredUpdateProof { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let val = i64::deserialize(deserializer)?; + Ok(Self::from(val)) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/seal.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/seal.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sector/seal.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sector/seal.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/smooth/alpha_beta_filter.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/smooth/alpha_beta_filter.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/smooth/alpha_beta_filter.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/smooth/alpha_beta_filter.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/smooth/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/smooth/mod.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/smooth/mod.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/smooth/mod.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/smooth/smooth_func.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/smooth/smooth_func.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/smooth/smooth_func.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/smooth/smooth_func.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/state/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/state/mod.rs new file mode 100644 index 000000000000..498d63eec50b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/state/mod.rs @@ -0,0 +1,149 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_encoding::repr::*; +use fvm_ipld_encoding::tuple::*; +use num_traits::Zero; +#[cfg(feature = "arb")] +use quickcheck::Arbitrary; +use serde::{Deserialize, Serialize}; + +use crate::address::Address; +use crate::econ::TokenAmount; +use crate::EMPTY_ARR_CID; + +/// Specifies the version of the state tree +#[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Serialize_repr, Deserialize_repr)] +#[repr(u64)] +pub enum StateTreeVersion { + /// Corresponds to actors < v2 + V0, + /// Corresponds to actors = v2 + V1, + /// Corresponds to actors = v3 + V2, + /// Corresponds to actors = v4 + V3, + /// Corresponds to actors >= v5 + V4, + /// Corresponding to actors >= v10 + V5, +} + +/// State root information. Contains information about the version of the state tree, +/// the root of the tree, and a link to the information about the tree. +#[derive(Deserialize_tuple, Serialize_tuple)] +pub struct StateRoot { + /// State tree version + pub version: StateTreeVersion, + + /// Actors tree. The structure depends on the state root version. + pub actors: Cid, + + /// Info. The structure depends on the state root version. + pub info: Cid, +} + +/// Empty state tree information. This is serialized as an array for future proofing. +#[derive(Default, Deserialize, Serialize)] +#[serde(transparent)] +pub struct StateInfo0([(); 0]); + +/// State of all actor implementations. +#[derive(PartialEq, Eq, Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ActorState { + /// Link to code for the actor. + pub code: Cid, + /// Link to the state of the actor. + pub state: Cid, + /// Sequence of the actor. + pub sequence: u64, + /// Tokens available to the actor. + pub balance: TokenAmount, + /// The actor's "delegated" address, if assigned. + /// + /// This field is set on actor creation and never modified. + pub delegated_address: Option
, +} + +/// Error returned when attempting to deduct funds with an insufficient balance. +#[derive(thiserror::Error, Debug)] +pub enum InvalidTransfer { + #[error("insufficient funds when deducting funds ({amount}) from balance ({balance})")] + InsufficientBalance { + amount: TokenAmount, + balance: TokenAmount, + }, + #[error("cannot deposite/deduct a negative amount of funds ({0})")] + NegativeAmount(TokenAmount), +} + +impl ActorState { + /// Constructor for actor state + pub fn new( + code: Cid, + state: Cid, + balance: TokenAmount, + sequence: u64, + address: Option
, + ) -> Self { + Self { + code, + state, + sequence, + balance, + delegated_address: address, + } + } + + /// Construct a new empty actor with the specified code. + pub fn new_empty(code: Cid, delegated_address: Option
) -> Self { + ActorState { + code, + state: EMPTY_ARR_CID, + sequence: 0, + balance: TokenAmount::zero(), + delegated_address, + } + } + + /// Safely deducts funds from an Actor + pub fn deduct_funds(&mut self, amt: &TokenAmount) -> Result<(), InvalidTransfer> { + if amt.is_negative() { + return Err(InvalidTransfer::NegativeAmount(amt.clone())); + } + if &self.balance < amt { + return Err(InvalidTransfer::InsufficientBalance { + amount: amt.clone(), + balance: self.balance.clone(), + }); + } + self.balance -= amt; + + Ok(()) + } + /// Deposits funds to an Actor + pub fn deposit_funds(&mut self, amt: &TokenAmount) -> Result<(), InvalidTransfer> { + if amt.is_negative() { + Err(InvalidTransfer::NegativeAmount(amt.clone())) + } else { + self.balance += amt; + Ok(()) + } + } +} + +#[cfg(feature = "arb")] +impl Arbitrary for ActorState { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + Self { + code: Cid::arbitrary(g), + state: Cid::arbitrary(g), + sequence: u64::arbitrary(g), + balance: TokenAmount::arbitrary(g), + delegated_address: Option::arbitrary(g), + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sys/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sys/mod.rs new file mode 100644 index 000000000000..9529b8a3642e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sys/mod.rs @@ -0,0 +1,114 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! This module contains types exchanged at the syscall layer between actors +//! (usually through the SDK) and the FVM. + +use bitflags::bitflags; +use num_bigint::TryFromBigIntError; + +pub mod out; + +pub type BlockId = u32; +pub type Codec = u64; + +/// The token amount type used in syscalls. It can represent any token amount (in atto-FIL) from 0 +/// to `2^128-1` attoFIL. Or 0 to about 340 exaFIL. +/// +/// Internally, this type is a tuple of `u64`s storing the "low" and "high" bits of a little-endian +/// u128. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(packed, C)] +pub struct TokenAmount { + pub lo: u64, + pub hi: u64, +} + +impl From for crate::econ::TokenAmount { + fn from(v: TokenAmount) -> Self { + crate::econ::TokenAmount::from_atto((v.hi as u128) << 64 | (v.lo as u128)) + } +} + +impl TryFrom for TokenAmount { + type Error = TryFromBigIntError<()>; + fn try_from(v: crate::econ::TokenAmount) -> Result { + v.atto().try_into().map(|v: u128| Self { + hi: (v >> u64::BITS) as u64, + lo: v as u64, + }) + } +} + +impl<'a> TryFrom<&'a crate::econ::TokenAmount> for TokenAmount { + type Error = TryFromBigIntError<()>; + fn try_from(v: &'a crate::econ::TokenAmount) -> Result { + v.atto().try_into().map(|v: u128| Self { + hi: (v >> u64::BITS) as u64, + lo: v as u64, + }) + } +} + +bitflags! { + /// Flags passed to the send syscall. + #[derive(Default, Copy, Clone, Eq, PartialEq, Debug)] + #[repr(transparent)] + // note: this is 64 bits because I don't want to hate my past self, not because we need them + // right now. It doesn't really cost anything anyways. + pub struct SendFlags: u64 { + /// Send in "read-only" mode. + const READ_ONLY = 0b00000001; + } +} + +impl SendFlags { + pub fn read_only(self) -> bool { + self.intersects(Self::READ_ONLY) + } +} + +/// A fixed sized struct for serializing an [event `Entry`](crate::event::Entry) separately from the +/// key/value bytes. +#[repr(C, packed)] +pub struct EventEntry { + pub flags: crate::event::Flags, + pub codec: u64, + pub key_len: u32, + pub val_len: u32, +} + +/// An unsafe trait to mark "syscall safe" types. These types must be safe to memcpy to and from +/// WASM. This means: +/// +/// 1. Repr C & packed alignment (no reordering, no padding). +/// 2. Copy, Sized, and no pointers. +/// 3. No floats (non-determinism). +/// +/// # Safety +/// +/// Incorrectly implementing this could lead to undefined behavior in types passed between wasm and +/// rust. +pub unsafe trait SyscallSafe: Copy + Sized + 'static {} + +macro_rules! assert_syscall_safe { + ($($t:ty,)*) => { + $(unsafe impl SyscallSafe for $t {})* + } +} + +assert_syscall_safe! { + (), + + u8, u16, u32, u64, + i8, i16, i32, i64, + + TokenAmount, + out::ipld::IpldOpen, + out::ipld::IpldStat, + out::send::Send, + out::crypto::VerifyConsensusFault, + out::network::NetworkContext, + out::vm::MessageContext, +} + +unsafe impl SyscallSafe for [T; N] where T: SyscallSafe {} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sys/out.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sys/out.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/src/sys/out.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/sys/out.rs diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/upgrade/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/upgrade/mod.rs new file mode 100644 index 000000000000..557f45abd85e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/upgrade/mod.rs @@ -0,0 +1,10 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use cid::Cid; +use fvm_ipld_encoding::tuple::*; + +#[derive(Clone, Debug, Copy, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct UpgradeInfo { + // the old code cid we are upgrading from + pub old_code_cid: Cid, +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/version/mod.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/version/mod.rs new file mode 100644 index 000000000000..140c854608ab --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/src/version/mod.rs @@ -0,0 +1,91 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +/// Specifies the network version +#[derive(Debug, Eq, PartialEq, Clone, Copy, Ord, PartialOrd, Serialize, Deserialize)] +#[repr(transparent)] +#[serde(transparent)] +pub struct NetworkVersion(u32); + +impl NetworkVersion { + /// genesis (specs-actors v0.9.3) + pub const V0: Self = Self(0); + /// breeze (specs-actors v0.9.7) + pub const V1: Self = Self(1); + /// smoke (specs-actors v0.9.8) + pub const V2: Self = Self(2); + /// ignition (specs-actors v0.9.11) + pub const V3: Self = Self(3); + /// actors v2 (specs-actors v2.0.x) + pub const V4: Self = Self(4); + /// tape (increases max prove commit size by 10x) + pub const V5: Self = Self(5); + /// kumquat (specs-actors v2.2.0) + pub const V6: Self = Self(6); + /// calico (specs-actors v2.3.2) + pub const V7: Self = Self(7); + /// persian (post-2.3.2 behaviour transition) + pub const V8: Self = Self(8); + /// orange + pub const V9: Self = Self(9); + /// trust (specs-actors v3.0.x) + pub const V10: Self = Self(10); + /// norwegian (specs-actors v3.1.x) + pub const V11: Self = Self(11); + /// turbo (specs-actors v4.0.x) + pub const V12: Self = Self(12); + /// HyperDrive + pub const V13: Self = Self(13); + /// Chocolate v6 + pub const V14: Self = Self(14); + /// OhSnap v7 + pub const V15: Self = Self(15); + /// Skyr (builtin-actors v8) + pub const V16: Self = Self(16); + /// Shark (builtin-actors v9) + pub const V17: Self = Self(17); + /// Hygge (builtin-actors v10) + pub const V18: Self = Self(18); + /// Lightning (builtin-actors v11) + pub const V19: Self = Self(19); + /// Thunder (builtin-actors v11) + pub const V20: Self = Self(20); + /// Watermelon (builtin-actors v12) + pub const V21: Self = Self(21); + /// Dragon (builtin-actors v13) + pub const V22: Self = Self(22); + /// Waffle (builtin-actors v14) + pub const V23: Self = Self(23); + /// TBD (builtin-actors v15) + pub const V24: Self = Self(24); + + pub const MAX: Self = Self(u32::MAX); + + /// Construct a new arbitrary network version. + pub const fn new(v: u32) -> Self { + Self(v) + } +} + +impl Display for NetworkVersion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for NetworkVersion { + fn from(v: u32) -> Self { + Self(v) + } +} + +impl From for u32 { + fn from(v: NetworkVersion) -> Self { + v.0 + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/tests/address_test.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/tests/address_test.rs similarity index 99% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/tests/address_test.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/tests/address_test.rs index 0ce8bc4929a8..6af767c13000 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/tests/address_test.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/tests/address_test.rs @@ -581,7 +581,7 @@ fn address_hashmap() { // insert other value let h2 = Address::new_id(2); - assert!(hm.get(&h2).is_none()); + assert!(!hm.contains_key(&h2)); hm.insert(h2, 2); assert_eq!(hm.get(&h2).unwrap(), &2); diff --git a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/tests/commcid_tests.rs b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/tests/commcid_tests.rs similarity index 97% rename from third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/tests/commcid_tests.rs rename to third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/tests/commcid_tests.rs index e43bebe7359e..dd80e9070c9d 100644 --- a/third_party/rust/chromium_crates_io/vendor/fvm_shared-3.10.0/tests/commcid_tests.rs +++ b/third_party/rust/chromium_crates_io/vendor/fvm_shared-4.5.1/tests/commcid_tests.rs @@ -2,9 +2,9 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use cid::multihash::{Code, Multihash, MultihashDigest}; use cid::Cid; use fvm_shared::commcid::*; +use multihash_codetable::{Code, Multihash, MultihashDigest}; use rand::{thread_rng, Rng}; fn rand_comm() -> Commitment { diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..4f0c31404485 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "dd56b3b19713c893f4a0e2344dff8e74073b7089" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.gitignore b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.gitignore similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.gitignore rename to third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/.gitignore diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/CHANGELOG.md new file mode 100644 index 000000000000..cdcc070ab1c0 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/CHANGELOG.md @@ -0,0 +1,580 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/) +and this project adheres to [Semantic Versioning](https://semver.org/). + +## [Unreleased] + +## [v0.15.1] - 2024-11-03 + +This release removes the `borsh` feature introduced in 0.15.0 because it was +found to be incorrectly implemented. Users should use the `hashbrown` feature of +the `borsh` crate instead which provides the same trait implementations. + +## ~~[v0.15.0] - 2024-10-01~~ + +This release was _yanked_ due to a broken implementation of the `borsh` feature. + +This update contains breaking changes that remove the `raw` API with the hope of +centralising on the `HashTable` API in the future. You can follow the discussion +and progress in #545 to discuss features you think should be added to this API +that were previously only possible on the `raw` API. + +### Added + +- Added `borsh` feature with `BorshSerialize` and `BorshDeserialize` impls. (#525) +- Added `Assign` impls for `HashSet` operators. (#529) +- Added `Default` impls for iterator types. (#542) +- Added `HashTable::iter_hash{,_mut}` methods. (#549) +- Added `Hash{Table,Map,Set}::allocation_size` methods. (#553) +- Implemented `Debug` and `FusedIterator` for all `HashTable` iterators. (#561) +- Specialized `Iterator::fold` for all `HashTable` iterators. (#561) + +### Changed + +- Changed `hash_set::VacantEntry::insert` to return `OccupiedEntry`. (#495) +- Improved`hash_set::Difference::size_hint` lower-bound. (#530) +- Improved `HashSet::is_disjoint` performance. (#531) +- `equivalent` feature is now enabled by default. (#532) +- `HashSet` operators now return a set with the same allocator. (#529) +- Changed the default hasher to foldhash. (#563) +- `ahash` feature has been renamed to `default-hasher`. (#533) +- Entry API has been reworked and several methods have been renamed. (#535) +- `Hash{Map,Set}::insert_unique_unchecked` is now unsafe. (#556) +- The signature of `get_many_mut` and related methods was changed. (#562) + +### Fixed + +* Fixed typos, stray backticks in docs. (#558, #560) + +### Removed + +- Raw entry API is now under `raw-entry` feature, to be eventually removed. (#534, #555) +- Raw table API has been made private and the `raw` feature is removed; + in the future, all code should be using the `HashTable` API instead. (#531, #546) +- `rykv` feature was removed; this is now provided by the `rykv` crate instead. (#554) +- `HashSet::get_or_insert_owned` was removed in favor of `get_or_insert_with`. (#555) + +## [v0.14.5] - 2024-04-28 + +### Fixed + +- Fixed index calculation in panic guard of `clone_from_impl`. (#511) + +## ~~[v0.14.4] - 2024-03-19~~ + +This release was _yanked_ due to a breaking change. + +## [v0.14.3] - 2023-11-26 + +### Added + +- Specialized `fold` implementation of iterators. (#480) + +### Fixed + +- Avoid using unstable `ptr::invalid_mut` on nightly. (#481) + +## [v0.14.2] - 2023-10-19 + +### Added + +- `HashTable` type which provides a low-level but safe API with explicit hashing. (#466) + +### Fixed + +- Disabled the use of NEON instructions on big-endian ARM. (#475) +- Disabled the use of NEON instructions on Miri. (#476) + +## [v0.14.1] - 2023-09-28 + +### Added + +- Allow serializing `HashMap`s that use a custom allocator. (#449) + +### Changed + +- Use the `Equivalent` trait from the `equivalent` crate. (#442) +- Slightly improved performance of table resizing. (#451) +- Relaxed MSRV to 1.63.0. (#457) +- Removed `Clone` requirement from custom allocators. (#468) + +### Fixed + +- Fixed custom allocators being leaked in some situations. (#439, #465) + +## [v0.14.0] - 2023-06-01 + +### Added + +- Support for `allocator-api2` crate + for interfacing with custom allocators on stable. (#417) +- Optimized implementation for ARM using NEON instructions. (#430) +- Support for rkyv serialization. (#432) +- `Equivalent` trait to look up values without `Borrow`. (#345) +- `Hash{Map,Set}::raw_table_mut` is added which returns a mutable reference. (#404) +- Fast path for `clear` on empty tables. (#428) + +### Changed + +- Optimized insertion to only perform a single lookup. (#277) +- `DrainFilter` (`drain_filter`) has been renamed to `ExtractIf` and no longer drops remaining + elements when the iterator is dropped. #(374) +- Bumped MSRV to 1.64.0. (#431) +- `{Map,Set}::raw_table` now returns an immutable reference. (#404) +- `VacantEntry` and `OccupiedEntry` now use the default hasher if none is + specified in generics. (#389) +- `RawTable::data_start` now returns a `NonNull` to match `RawTable::data_end`. (#387) +- `RawIter::{reflect_insert, reflect_remove}` are now unsafe. (#429) +- `RawTable::find_potential` is renamed to `find_or_find_insert_slot` and returns an `InsertSlot`. (#429) +- `RawTable::remove` now also returns an `InsertSlot`. (#429) +- `InsertSlot` can be used to insert an element with `RawTable::insert_in_slot`. (#429) +- `RawIterHash` no longer has a lifetime tied to that of the `RawTable`. (#427) +- The trait bounds of `HashSet::raw_table` have been relaxed to not require `Eq + Hash`. (#423) +- `EntryRef::and_replace_entry_with` and `OccupiedEntryRef::replace_entry_with` + were changed to give a `&K` instead of a `&Q` to the closure. + +### Removed + +- Support for `bumpalo` as an allocator with custom wrapper. + Use `allocator-api2` feature in `bumpalo` to use it as an allocator + for `hashbrown` collections. (#417) + +## [v0.13.2] - 2023-01-12 + +### Fixed + +- Added `#[inline(always)]` to `find_inner`. (#375) +- Fixed `RawTable::allocation_info` for empty tables. (#376) + +## [v0.13.1] - 2022-11-10 + +### Added + +- Added `Equivalent` trait to customize key lookups. (#350) +- Added support for 16-bit targets. (#368) +- Added `RawTable::allocation_info` which provides information about the memory + usage of a table. (#371) + +### Changed + +- Bumped MSRV to 1.61.0. +- Upgraded to `ahash` 0.8. (#357) +- Make `with_hasher_in` const. (#355) +- The following methods have been removed from the `RawTable` API in favor of + safer alternatives: + - `RawTable::erase_no_drop` => Use `RawTable::erase` or `RawTable::remove` instead. + - `Bucket::read` => Use `RawTable::remove` instead. + - `Bucket::drop` => Use `RawTable::erase` instead. + - `Bucket::write` => Use `Bucket::as_mut` instead. + +### Fixed + +- Ensure that `HashMap` allocations don't exceed `isize::MAX`. (#362) +- Fixed issue with field retagging in scopeguard. (#359) + +## [v0.12.3] - 2022-07-17 + +### Fixed + +- Fixed double-drop in `RawTable::clone_from`. (#348) + +## [v0.12.2] - 2022-07-09 + +### Added + +- Added `Entry` API for `HashSet`. (#342) +- Added `Extend<&'a (K, V)> for HashMap`. (#340) +- Added length-based short-circuiting for hash table iteration. (#338) +- Added a function to access the `RawTable` of a `HashMap`. (#335) + +### Changed + +- Edited `do_alloc` to reduce LLVM IR generated. (#341) + +## [v0.12.1] - 2022-05-02 + +### Fixed + +- Fixed underflow in `RawIterRange::size_hint`. (#325) +- Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325) + +## [v0.12.0] - 2022-01-17 + +### Added + +- Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297) +- Added an `allocator()` getter to HashMap and HashSet. (#257) +- Added `insert_unique_unchecked` to `HashMap` and `HashSet`. (#293) +- Added `into_keys` and `into_values` to HashMap. (#295) +- Implement `From` on `HashSet` and `HashMap`. (#298) +- Added `entry_ref` API to `HashMap`. (#201) + +### Changed + +- Bumped minimum Rust version to 1.56.1 and edition to 2021. +- Use u64 for the GroupWord on WebAssembly. (#271) +- Optimized `find`. (#279) +- Made rehashing and resizing less generic to reduce compilation time. (#282) +- Inlined small functions. (#283) +- Use `BuildHasher::hash_one` when `feature = "nightly"` is enabled. (#292) +- Relaxed the bounds on `Debug` for `HashSet`. (#296) +- Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291) +- Don't hash the key when searching in an empty table. (#305) + +### Fixed + +- Guard against allocations exceeding isize::MAX. (#268) +- Made `RawTable::insert_no_grow` unsafe. (#254) +- Inline `static_empty`. (#280) +- Fixed trait bounds on Send/Sync impls. (#303) + +## [v0.11.2] - 2021-03-25 + +### Fixed + +- Added missing allocator type parameter to `HashMap`'s and `HashSet`'s `Clone` impls. (#252) + +## [v0.11.1] - 2021-03-20 + +### Fixed + +- Added missing `pub` modifier to `BumpWrapper`. (#251) + +## [v0.11.0] - 2021-03-14 + +### Added +- Added safe `try_insert_no_grow` method to `RawTable`. (#229) +- Added support for `bumpalo` as an allocator without the `nightly` feature. (#231) +- Implemented `Default` for `RawTable`. (#237) +- Added new safe methods `RawTable::get_each_mut`, `HashMap::get_each_mut`, and + `HashMap::get_each_key_value_mut`. (#239) +- Added `From>` for `HashSet`. (#235) +- Added `try_insert` method to `HashMap`. (#247) + +### Changed +- The minimum Rust version has been bumped to 1.49.0. (#230) +- Significantly improved compilation times by reducing the amount of generated IR. (#205) + +### Removed +- We no longer re-export the unstable allocator items from the standard library, nor the stable shims approximating the same. (#227) +- Removed hasher specialization support from `aHash`, which was resulting in inconsistent hashes being generated for a key. (#248) + +### Fixed +- Fixed union length comparison. (#228) + +## ~~[v0.10.0] - 2021-01-16~~ + +This release was _yanked_ due to inconsistent hashes being generated with the `nightly` feature. (#248) + +### Changed +- Parametrized `RawTable`, `HashSet` and `HashMap` over an allocator. (#133) +- Improved branch prediction hints on stable. (#209) +- Optimized hashing of primitive types with AHash using specialization. (#207) +- Only instantiate `RawTable`'s reserve functions once per key-value. (#204) + +## [v0.9.1] - 2020-09-28 + +### Added +- Added safe methods to `RawTable` (#202): + - `get`: `find` and `as_ref` + - `get_mut`: `find` and `as_mut` + - `insert_entry`: `insert` and `as_mut` + - `remove_entry`: `find` and `remove` + - `erase_entry`: `find` and `erase` + +### Changed +- Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200) +- Made `RawTable::drain` safe. (#201) + +## [v0.9.0] - 2020-09-03 + +### Fixed +- `drain_filter` now removes and yields items that do match the predicate, + rather than items that don't. This is a **breaking change** to match the + behavior of the `drain_filter` methods in `std`. (#187) + +### Added +- Added `replace_entry_with` to `OccupiedEntry`, and `and_replace_entry_with` to `Entry`. (#190) +- Implemented `FusedIterator` and `size_hint` for `DrainFilter`. (#188) + +### Changed +- The minimum Rust version has been bumped to 1.36 (due to `crossbeam` dependency). (#193) +- Updated `ahash` dependency to 0.4. (#198) +- `HashMap::with_hasher` and `HashSet::with_hasher` are now `const fn`. (#195) +- Removed `T: Hash + Eq` and `S: BuildHasher` bounds on `HashSet::new`, + `with_capacity`, `with_hasher`, and `with_capacity_and_hasher`. (#185) + +## [v0.8.2] - 2020-08-08 + +### Changed +- Avoid closures to improve compile times. (#183) +- Do not iterate to drop if empty. (#182) + +## [v0.8.1] - 2020-07-16 + +### Added +- Added `erase` and `remove` to `RawTable`. (#171) +- Added `try_with_capacity` to `RawTable`. (#174) +- Added methods that allow re-using a `RawIter` for `RawDrain`, + `RawIntoIter`, and `RawParIter`. (#175) +- Added `reflect_remove` and `reflect_insert` to `RawIter`. (#175) +- Added a `drain_filter` function to `HashSet`. (#179) + +### Changed +- Deprecated `RawTable::erase_no_drop` in favor of `erase` and `remove`. (#176) +- `insert_no_grow` is now exposed under the `"raw"` feature. (#180) + +## [v0.8.0] - 2020-06-18 + +### Fixed +- Marked `RawTable::par_iter` as `unsafe`. (#157) + +### Changed +- Reduced the size of `HashMap`. (#159) +- No longer create tables with a capacity of 1 element. (#162) +- Removed `K: Eq + Hash` bounds on `retain`. (#163) +- Pulled in `HashMap` changes from rust-lang/rust (#164): + - `extend_one` support on nightly. + - `CollectionAllocErr` renamed to `TryReserveError`. + - Added `HashSet::get_or_insert_owned`. + - `Default` for `HashSet` no longer requires `T: Eq + Hash` and `S: BuildHasher`. + +## [v0.7.2] - 2020-04-27 + +### Added +- Added `or_insert_with_key` to `Entry`. (#152) + +### Fixed +- Partially reverted `Clone` optimization which was unsound. (#154) + +### Changed +- Disabled use of `const-random` by default, which prevented reproducible builds. (#155) +- Optimized `repeat` function. (#150) +- Use `NonNull` for buckets, which improves codegen for iterators. (#148) + +## [v0.7.1] - 2020-03-16 + +### Added +- Added `HashMap::get_key_value_mut`. (#145) + +### Changed +- Optimized `Clone` implementation. (#146) + +## [v0.7.0] - 2020-01-31 + +### Added +- Added a `drain_filter` function to `HashMap`. (#135) + +### Changed +- Updated `ahash` dependency to 0.3. (#141) +- Optimized set union and intersection. (#130) +- `raw_entry` can now be used without requiring `S: BuildHasher`. (#123) +- `RawTable::bucket_index` can now be used under the `raw` feature. (#128) + +## [v0.6.3] - 2019-10-31 + +### Added +- Added an `ahash-compile-time-rng` feature (enabled by default) which allows disabling the + `compile-time-rng` feature in `ahash` to work around a Cargo bug. (#125) + +## [v0.6.2] - 2019-10-23 + +### Added +- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between + runtime performance and compilation time. (#119) + +## [v0.6.1] - 2019-10-04 + +### Added +- Added `Entry::insert` and `RawEntryMut::insert`. (#118) + +### Changed +- `Group::static_empty` was changed from a `const` to a `static` (#116). + +## [v0.6.0] - 2019-08-13 + +### Fixed +- Fixed AHash accidentally depending on `std`. (#110) + +### Changed +- The minimum Rust version has been bumped to 1.32 (due to `rand` dependency). + +## ~~[v0.5.1] - 2019-08-04~~ + +This release was _yanked_ due to a breaking change for users of `no-default-features`. + +### Added +- The experimental and unsafe `RawTable` API is available under the "raw" feature. (#108) +- Added entry-like methods for `HashSet`. (#98) + +### Changed +- Changed the default hasher from FxHash to AHash. (#97) +- `hashbrown` is now fully `no_std` on recent Rust versions (1.36+). (#96) + +### Fixed +- We now avoid growing the table during insertions when it wasn't necessary. (#106) +- `RawOccupiedEntryMut` now properly implements `Send` and `Sync`. (#100) +- Relaxed `lazy_static` version. (#92) + +## [v0.5.0] - 2019-06-12 + +### Fixed +- Resize with a more conservative amount of space after deletions. (#86) + +### Changed +- Exposed the Layout of the failed allocation in CollectionAllocErr::AllocErr. (#89) + +## [v0.4.0] - 2019-05-30 + +### Fixed +- Fixed `Send` trait bounds on `IterMut` not matching the libstd one. (#82) + +## [v0.3.1] - 2019-05-30 + +### Fixed +- Fixed incorrect use of slice in unsafe code. (#80) + +## [v0.3.0] - 2019-04-23 + +### Changed +- Changed shrink_to to not panic if min_capacity < capacity. (#67) + +### Fixed +- Worked around emscripten bug emscripten-core/emscripten-fastcomp#258. (#66) + +## [v0.2.2] - 2019-04-16 + +### Fixed +- Inlined non-nightly lowest_set_bit_nonzero. (#64) +- Fixed build on latest nightly. (#65) + +## [v0.2.1] - 2019-04-14 + +### Changed +- Use for_each in map Extend and FromIterator. (#58) +- Improved worst-case performance of HashSet.is_subset. (#61) + +### Fixed +- Removed incorrect debug_assert. (#60) + +## [v0.2.0] - 2019-03-31 + +### Changed +- The code has been updated to Rust 2018 edition. This means that the minimum + Rust version has been bumped to 1.31 (2018 edition). + +### Added +- Added `insert_with_hasher` to the raw_entry API to allow `K: !(Hash + Eq)`. (#54) +- Added support for using hashbrown as the hash table implementation in libstd. (#46) + +### Fixed +- Fixed cargo build with minimal-versions. (#45) +- Fixed `#[may_dangle]` attributes to match the libstd `HashMap`. (#46) +- ZST keys and values are now handled properly. (#46) + +## [v0.1.8] - 2019-01-14 + +### Added +- Rayon parallel iterator support (#37) +- `raw_entry` support (#31) +- `#[may_dangle]` on nightly (#31) +- `try_reserve` support (#31) + +### Fixed +- Fixed variance on `IterMut`. (#31) + +## [v0.1.7] - 2018-12-05 + +### Fixed +- Fixed non-SSE version of convert_special_to_empty_and_full_to_deleted. (#32) +- Fixed overflow in rehash_in_place. (#33) + +## [v0.1.6] - 2018-11-17 + +### Fixed +- Fixed compile error on nightly. (#29) + +## [v0.1.5] - 2018-11-08 + +### Fixed +- Fixed subtraction overflow in generic::Group::match_byte. (#28) + +## [v0.1.4] - 2018-11-04 + +### Fixed +- Fixed a bug in the `erase_no_drop` implementation. (#26) + +## [v0.1.3] - 2018-11-01 + +### Added +- Serde support. (#14) + +### Fixed +- Make the compiler inline functions more aggressively. (#20) + +## [v0.1.2] - 2018-10-31 + +### Fixed +- `clear` segfaults when called on an empty table. (#13) + +## [v0.1.1] - 2018-10-30 + +### Fixed +- `erase_no_drop` optimization not triggering in the SSE2 implementation. (#3) +- Missing `Send` and `Sync` for hash map and iterator types. (#7) +- Bug when inserting into a table smaller than the group width. (#5) + +## v0.1.0 - 2018-10-29 + +- Initial release + +[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.15.1...HEAD +[v0.15.1]: https://github.com/rust-lang/hashbrown/compare/v0.15.0...v0.15.1 +[v0.15.0]: https://github.com/rust-lang/hashbrown/compare/v0.14.5...v0.15.0 +[v0.14.5]: https://github.com/rust-lang/hashbrown/compare/v0.14.4...v0.14.5 +[v0.14.4]: https://github.com/rust-lang/hashbrown/compare/v0.14.3...v0.14.4 +[v0.14.3]: https://github.com/rust-lang/hashbrown/compare/v0.14.2...v0.14.3 +[v0.14.2]: https://github.com/rust-lang/hashbrown/compare/v0.14.1...v0.14.2 +[v0.14.1]: https://github.com/rust-lang/hashbrown/compare/v0.14.0...v0.14.1 +[v0.14.0]: https://github.com/rust-lang/hashbrown/compare/v0.13.2...v0.14.0 +[v0.13.2]: https://github.com/rust-lang/hashbrown/compare/v0.13.1...v0.13.2 +[v0.13.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...v0.13.1 +[v0.12.3]: https://github.com/rust-lang/hashbrown/compare/v0.12.2...v0.12.3 +[v0.12.2]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...v0.12.2 +[v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1 +[v0.12.0]: https://github.com/rust-lang/hashbrown/compare/v0.11.2...v0.12.0 +[v0.11.2]: https://github.com/rust-lang/hashbrown/compare/v0.11.1...v0.11.2 +[v0.11.1]: https://github.com/rust-lang/hashbrown/compare/v0.11.0...v0.11.1 +[v0.11.0]: https://github.com/rust-lang/hashbrown/compare/v0.10.0...v0.11.0 +[v0.10.0]: https://github.com/rust-lang/hashbrown/compare/v0.9.1...v0.10.0 +[v0.9.1]: https://github.com/rust-lang/hashbrown/compare/v0.9.0...v0.9.1 +[v0.9.0]: https://github.com/rust-lang/hashbrown/compare/v0.8.2...v0.9.0 +[v0.8.2]: https://github.com/rust-lang/hashbrown/compare/v0.8.1...v0.8.2 +[v0.8.1]: https://github.com/rust-lang/hashbrown/compare/v0.8.0...v0.8.1 +[v0.8.0]: https://github.com/rust-lang/hashbrown/compare/v0.7.2...v0.8.0 +[v0.7.2]: https://github.com/rust-lang/hashbrown/compare/v0.7.1...v0.7.2 +[v0.7.1]: https://github.com/rust-lang/hashbrown/compare/v0.7.0...v0.7.1 +[v0.7.0]: https://github.com/rust-lang/hashbrown/compare/v0.6.3...v0.7.0 +[v0.6.3]: https://github.com/rust-lang/hashbrown/compare/v0.6.2...v0.6.3 +[v0.6.2]: https://github.com/rust-lang/hashbrown/compare/v0.6.1...v0.6.2 +[v0.6.1]: https://github.com/rust-lang/hashbrown/compare/v0.6.0...v0.6.1 +[v0.6.0]: https://github.com/rust-lang/hashbrown/compare/v0.5.1...v0.6.0 +[v0.5.1]: https://github.com/rust-lang/hashbrown/compare/v0.5.0...v0.5.1 +[v0.5.0]: https://github.com/rust-lang/hashbrown/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/rust-lang/hashbrown/compare/v0.3.1...v0.4.0 +[v0.3.1]: https://github.com/rust-lang/hashbrown/compare/v0.3.0...v0.3.1 +[v0.3.0]: https://github.com/rust-lang/hashbrown/compare/v0.2.2...v0.3.0 +[v0.2.2]: https://github.com/rust-lang/hashbrown/compare/v0.2.1...v0.2.2 +[v0.2.1]: https://github.com/rust-lang/hashbrown/compare/v0.2.0...v0.2.1 +[v0.2.0]: https://github.com/rust-lang/hashbrown/compare/v0.1.8...v0.2.0 +[v0.1.8]: https://github.com/rust-lang/hashbrown/compare/v0.1.7...v0.1.8 +[v0.1.7]: https://github.com/rust-lang/hashbrown/compare/v0.1.6...v0.1.7 +[v0.1.6]: https://github.com/rust-lang/hashbrown/compare/v0.1.5...v0.1.6 +[v0.1.5]: https://github.com/rust-lang/hashbrown/compare/v0.1.4...v0.1.5 +[v0.1.4]: https://github.com/rust-lang/hashbrown/compare/v0.1.3...v0.1.4 +[v0.1.3]: https://github.com/rust-lang/hashbrown/compare/v0.1.2...v0.1.3 +[v0.1.2]: https://github.com/rust-lang/hashbrown/compare/v0.1.1...v0.1.2 +[v0.1.1]: https://github.com/rust-lang/hashbrown/compare/v0.1.0...v0.1.1 diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/Cargo.toml new file mode 100644 index 000000000000..fa3587850303 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/Cargo.toml @@ -0,0 +1,172 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.65.0" +name = "hashbrown" +version = "0.15.1" +authors = ["Amanieu d'Antras "] +build = false +exclude = [ + ".github", + "/ci/*", +] +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A Rust port of Google's SwissTable hash map" +readme = "README.md" +keywords = [ + "hash", + "no_std", + "hashmap", + "swisstable", +] +categories = [ + "data-structures", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/hashbrown" + +[package.metadata.docs.rs] +features = [ + "nightly", + "rayon", + "serde", + "raw-entry", +] +rustdoc-args = ["--generate-link-to-definition"] + +[lib] +name = "hashbrown" +path = "src/lib.rs" + +[[test]] +name = "equivalent_trait" +path = "tests/equivalent_trait.rs" + +[[test]] +name = "hasher" +path = "tests/hasher.rs" + +[[test]] +name = "rayon" +path = "tests/rayon.rs" + +[[test]] +name = "serde" +path = "tests/serde.rs" + +[[test]] +name = "set" +path = "tests/set.rs" + +[[bench]] +name = "bench" +path = "benches/bench.rs" + +[[bench]] +name = "insert_unique_unchecked" +path = "benches/insert_unique_unchecked.rs" + +[[bench]] +name = "set_ops" +path = "benches/set_ops.rs" + +[dependencies.alloc] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-alloc" + +[dependencies.allocator-api2] +version = "0.2.9" +features = ["alloc"] +optional = true +default-features = false + +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[dependencies.equivalent] +version = "1.0" +optional = true +default-features = false + +[dependencies.foldhash] +version = "0.1.2" +optional = true +default-features = false + +[dependencies.rayon] +version = "1.2" +optional = true + +[dependencies.serde] +version = "1.0.25" +optional = true +default-features = false + +[dev-dependencies.bumpalo] +version = "3.13.0" +features = ["allocator-api2"] + +[dev-dependencies.doc-comment] +version = "0.3.1" + +[dev-dependencies.fnv] +version = "1.0.7" + +[dev-dependencies.lazy_static] +version = "1.4" + +[dev-dependencies.rand] +version = "0.8.3" +features = ["small_rng"] + +[dev-dependencies.rayon] +version = "1.2" + +[dev-dependencies.serde_test] +version = "1.0" + +[features] +default = [ + "default-hasher", + "inline-more", + "allocator-api2", + "equivalent", + "raw-entry", +] +default-hasher = ["dep:foldhash"] +inline-more = [] +nightly = [ + "allocator-api2?/nightly", + "bumpalo/allocator_api", +] +raw-entry = [] +rustc-dep-of-std = [ + "nightly", + "core", + "compiler_builtins", + "alloc", + "rustc-internal-api", + "raw-entry", +] +rustc-internal-api = [] diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/Cargo.toml.orig new file mode 100644 index 000000000000..572f5f9a55a1 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/Cargo.toml.orig @@ -0,0 +1,80 @@ +[package] +name = "hashbrown" +version = "0.15.1" +authors = ["Amanieu d'Antras "] +description = "A Rust port of Google's SwissTable hash map" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/hashbrown" +readme = "README.md" +keywords = ["hash", "no_std", "hashmap", "swisstable"] +categories = ["data-structures", "no-std"] +exclude = [".github", "/ci/*"] +edition = "2021" +rust-version = "1.65.0" + +[dependencies] +# For the default hasher +foldhash = { version = "0.1.2", default-features = false, optional = true } + +# For external trait impls +rayon = { version = "1.2", optional = true } +serde = { version = "1.0.25", default-features = false, optional = true } + +# When built as part of libstd +core = { version = "1.0.0", optional = true, package = "rustc-std-workspace-core" } +compiler_builtins = { version = "0.1.2", optional = true } +alloc = { version = "1.0.0", optional = true, package = "rustc-std-workspace-alloc" } + +# Support for allocators that use allocator-api2 +allocator-api2 = { version = "0.2.9", optional = true, default-features = false, features = [ + "alloc", +] } + +# Equivalent trait which can be shared with other hash table implementations. +equivalent = { version = "1.0", optional = true, default-features = false } + +[dev-dependencies] +lazy_static = "1.4" +rand = { version = "0.8.3", features = ["small_rng"] } +rayon = "1.2" +fnv = "1.0.7" +serde_test = "1.0" +doc-comment = "0.3.1" +bumpalo = { version = "3.13.0", features = ["allocator-api2"] } + +[features] +default = ["default-hasher", "inline-more", "allocator-api2", "equivalent", "raw-entry"] + +# Enables use of nightly features. This is only guaranteed to work on the latest +# version of nightly Rust. +nightly = ["allocator-api2?/nightly", "bumpalo/allocator_api"] + +# Enables the RustcEntry API used to provide the standard library's Entry API. +rustc-internal-api = [] + +# Internal feature used when building as part of the standard library. +rustc-dep-of-std = [ + "nightly", + "core", + "compiler_builtins", + "alloc", + "rustc-internal-api", + "raw-entry", +] + +# Enables the deprecated RawEntry API. +raw-entry = [] + +# Provides a default hasher. Currently this is foldhash but this is subject to +# change in the future. Note that the default hasher does *not* provide HashDoS +# resistance, unlike the one in the standard library. +default-hasher = ["dep:foldhash"] + +# Enables usage of `#[inline]` on far more functions than by default in this +# crate. This may lead to a performance increase but often comes at a compile +# time cost. +inline-more = [] + +[package.metadata.docs.rs] +features = ["nightly", "rayon", "serde", "raw-entry"] +rustdoc-args = ["--generate-link-to-definition"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/LICENSE-APACHE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/LICENSE-APACHE rename to third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/LICENSE-APACHE diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/LICENSE-MIT new file mode 100644 index 000000000000..5afc2a7b0aca --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Amanieu d'Antras + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/README.md b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/README.md new file mode 100644 index 000000000000..cb6dc26951f5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/README.md @@ -0,0 +1,80 @@ +hashbrown +========= + +[![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions) +[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) +[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) +[![Rust](https://img.shields.io/badge/rust-1.63.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) + +This crate is a Rust port of Google's high-performance [SwissTable] hash +map, adapted to make it a drop-in replacement for Rust's standard `HashMap` +and `HashSet` types. + +The original C++ version of SwissTable can be found [here], and this +[CppCon talk] gives an overview of how the algorithm works. + +Since Rust 1.36, this is now the `HashMap` implementation for the Rust standard +library. However you may still want to use this crate instead since it works +in environments without `std`, such as embedded systems and kernels. + +[SwissTable]: https://abseil.io/blog/20180927-swisstables +[here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +[CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 + +## [Change log](CHANGELOG.md) + +## Features + +- Drop-in replacement for the standard library `HashMap` and `HashSet` types. +- Uses [foldhash](https://github.com/orlp/foldhash) as the default hasher, which is much faster than SipHash. + However, foldhash does *not provide the same level of HashDoS resistance* as SipHash, so if that is important to you, you might want to consider using a different hasher. +- Around 2x faster than the previous standard library `HashMap`. +- Lower memory usage: only 1 byte of overhead per entry instead of 8. +- Compatible with `#[no_std]` (but requires a global allocator with the `alloc` crate). +- Empty hash maps do not allocate any memory. +- SIMD lookups to scan multiple hash entries in parallel. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +hashbrown = "0.15" +``` + +Then: + +```rust +use hashbrown::HashMap; + +let mut map = HashMap::new(); +map.insert(1, "one"); +``` +## Flags +This crate has the following Cargo features: + +- `nightly`: Enables nightly-only features including: `#[may_dangle]`. +- `serde`: Enables serde serialization support. +- `rayon`: Enables rayon parallel iterator support. +- `equivalent`: Allows comparisons to be customized with the `Equivalent` trait. (enabled by default) +- `raw-entry`: Enables access to the deprecated `RawEntry` API. +- `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost + of compilation time. (enabled by default) +- `default-hasher`: Compiles with foldhash as default hasher. (enabled by default) +- `allocator-api2`: Enables support for allocators that support `allocator-api2`. (enabled by default) + +## License + +Licensed under either of: + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/bench.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/bench.rs new file mode 100644 index 000000000000..dd55159dc692 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/bench.rs @@ -0,0 +1,331 @@ +// This benchmark suite contains some benchmarks along a set of dimensions: +// Hasher: std default (SipHash) and crate default (foldhash). +// Int key distribution: low bit heavy, top bit heavy, and random. +// Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter +#![feature(test)] + +extern crate test; + +use test::{black_box, Bencher}; + +use hashbrown::DefaultHashBuilder; +use hashbrown::{HashMap, HashSet}; +use std::{ + collections::hash_map::RandomState, + sync::atomic::{self, AtomicUsize}, +}; + +const SIZE: usize = 1000; + +// The default hashmap when using this crate directly. +type FoldHashMap = HashMap; +// This uses the hashmap from this crate with the default hasher of the stdlib. +type StdHashMap = HashMap; + +// A random key iterator. +#[derive(Clone, Copy)] +struct RandomKeys { + state: usize, +} + +impl RandomKeys { + fn new() -> Self { + RandomKeys { state: 0 } + } +} + +impl Iterator for RandomKeys { + type Item = usize; + fn next(&mut self) -> Option { + // Add 1 then multiply by some 32 bit prime. + self.state = self.state.wrapping_add(1).wrapping_mul(3_787_392_781); + Some(self.state) + } +} + +// Just an arbitrary side effect to make the maps not shortcircuit to the non-dropping path +// when dropping maps/entries (most real world usages likely have drop in the key or value) +lazy_static::lazy_static! { + static ref SIDE_EFFECT: AtomicUsize = AtomicUsize::new(0); +} + +#[derive(Clone)] +struct DropType(usize); +impl Drop for DropType { + fn drop(&mut self) { + SIDE_EFFECT.fetch_add(self.0, atomic::Ordering::SeqCst); + } +} + +macro_rules! bench_suite { + ($bench_macro:ident, $bench_foldhash_serial:ident, $bench_std_serial:ident, + $bench_foldhash_highbits:ident, $bench_std_highbits:ident, + $bench_foldhash_random:ident, $bench_std_random:ident) => { + $bench_macro!($bench_foldhash_serial, FoldHashMap, 0..); + $bench_macro!($bench_std_serial, StdHashMap, 0..); + $bench_macro!( + $bench_foldhash_highbits, + FoldHashMap, + (0..).map(usize::swap_bytes) + ); + $bench_macro!( + $bench_std_highbits, + StdHashMap, + (0..).map(usize::swap_bytes) + ); + $bench_macro!($bench_foldhash_random, FoldHashMap, RandomKeys::new()); + $bench_macro!($bench_std_random, StdHashMap, RandomKeys::new()); + }; +} + +macro_rules! bench_insert { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::with_capacity_and_hasher(SIZE, Default::default()); + b.iter(|| { + m.clear(); + for i in ($keydist).take(SIZE) { + m.insert(i, (DropType(i), [i; 20])); + } + black_box(&mut m); + }); + eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); + } + }; +} + +bench_suite!( + bench_insert, + insert_foldhash_serial, + insert_std_serial, + insert_foldhash_highbits, + insert_std_highbits, + insert_foldhash_random, + insert_std_random +); + +macro_rules! bench_grow_insert { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + b.iter(|| { + let mut m = $maptype::default(); + for i in ($keydist).take(SIZE) { + m.insert(i, DropType(i)); + } + black_box(&mut m); + }) + } + }; +} + +bench_suite!( + bench_grow_insert, + grow_insert_foldhash_serial, + grow_insert_std_serial, + grow_insert_foldhash_highbits, + grow_insert_std_highbits, + grow_insert_foldhash_random, + grow_insert_std_random +); + +macro_rules! bench_insert_erase { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut base = $maptype::default(); + for i in ($keydist).take(SIZE) { + base.insert(i, DropType(i)); + } + let skip = $keydist.skip(SIZE); + b.iter(|| { + let mut m = base.clone(); + let mut add_iter = skip.clone(); + let mut remove_iter = $keydist; + // While keeping the size constant, + // replace the first keydist with the second. + for (add, remove) in (&mut add_iter).zip(&mut remove_iter).take(SIZE) { + m.insert(add, DropType(add)); + black_box(m.remove(&remove)); + } + black_box(m); + }); + eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); + } + }; +} + +bench_suite!( + bench_insert_erase, + insert_erase_foldhash_serial, + insert_erase_std_serial, + insert_erase_foldhash_highbits, + insert_erase_std_highbits, + insert_erase_foldhash_random, + insert_erase_std_random +); + +macro_rules! bench_lookup { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + for i in $keydist.take(SIZE) { + m.insert(i, DropType(i)); + } + + b.iter(|| { + for i in $keydist.take(SIZE) { + black_box(m.get(&i)); + } + }); + eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); + } + }; +} + +bench_suite!( + bench_lookup, + lookup_foldhash_serial, + lookup_std_serial, + lookup_foldhash_highbits, + lookup_std_highbits, + lookup_foldhash_random, + lookup_std_random +); + +macro_rules! bench_lookup_fail { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + let mut iter = $keydist; + for i in (&mut iter).take(SIZE) { + m.insert(i, DropType(i)); + } + + b.iter(|| { + for i in (&mut iter).take(SIZE) { + black_box(m.get(&i)); + } + }) + } + }; +} + +bench_suite!( + bench_lookup_fail, + lookup_fail_foldhash_serial, + lookup_fail_std_serial, + lookup_fail_foldhash_highbits, + lookup_fail_std_highbits, + lookup_fail_foldhash_random, + lookup_fail_std_random +); + +macro_rules! bench_iter { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + for i in ($keydist).take(SIZE) { + m.insert(i, DropType(i)); + } + + b.iter(|| { + for i in &m { + black_box(i); + } + }) + } + }; +} + +bench_suite!( + bench_iter, + iter_foldhash_serial, + iter_std_serial, + iter_foldhash_highbits, + iter_std_highbits, + iter_foldhash_random, + iter_std_random +); + +#[bench] +fn clone_small(b: &mut Bencher) { + let mut m = HashMap::new(); + for i in 0..10 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + black_box(m.clone()); + }) +} + +#[bench] +fn clone_from_small(b: &mut Bencher) { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + for i in 0..10 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + m2.clone_from(&m); + black_box(&mut m2); + }) +} + +#[bench] +fn clone_large(b: &mut Bencher) { + let mut m = HashMap::new(); + for i in 0..1000 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + black_box(m.clone()); + }) +} + +#[bench] +fn clone_from_large(b: &mut Bencher) { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + for i in 0..1000 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + m2.clone_from(&m); + black_box(&mut m2); + }) +} + +#[bench] +fn rehash_in_place(b: &mut Bencher) { + b.iter(|| { + let mut set = HashSet::new(); + + // Each loop triggers one rehash + for _ in 0..10 { + for i in 0..223 { + set.insert(i); + } + + assert_eq!( + set.capacity(), + 224, + "The set must be at or close to capacity to trigger a re hashing" + ); + + for i in 100..1400 { + set.remove(&(i - 100)); + set.insert(i); + } + set.clear(); + } + }); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/insert_unique_unchecked.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/insert_unique_unchecked.rs new file mode 100644 index 000000000000..cfd69cdb7d01 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/insert_unique_unchecked.rs @@ -0,0 +1,34 @@ +//! Compare `insert` and `insert_unique_unchecked` operations performance. + +#![feature(test)] + +extern crate test; + +use hashbrown::HashMap; +use test::Bencher; + +#[bench] +fn insert(b: &mut Bencher) { + let keys: Vec = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect(); + b.iter(|| { + let mut m = HashMap::with_capacity(1000); + for k in &keys { + m.insert(k, k); + } + m + }); +} + +#[bench] +fn insert_unique_unchecked(b: &mut Bencher) { + let keys: Vec = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect(); + b.iter(|| { + let mut m = HashMap::with_capacity(1000); + for k in &keys { + unsafe { + m.insert_unique_unchecked(k, k); + } + } + m + }); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/set_ops.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/set_ops.rs new file mode 100644 index 000000000000..3b2ab5f28c29 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/benches/set_ops.rs @@ -0,0 +1,148 @@ +//! This file contains benchmarks for the ops traits implemented by HashSet. +//! Each test is intended to have a defined larger and smaller set, +//! but using a larger size for the "small" set works just as well. +//! +//! Each assigning test is done in the configuration that is faster. Cheating, I know. +//! The exception to this is Sub, because there the result differs. So I made two benchmarks for Sub. + +#![feature(test)] + +extern crate test; + +use hashbrown::HashSet; +use test::Bencher; + +/// The number of items to generate for the larger of the sets. +const LARGE_SET_SIZE: usize = 1000; + +/// The number of items to generate for the smaller of the sets. +const SMALL_SET_SIZE: usize = 100; + +/// The number of keys present in both sets. +const OVERLAP: usize = + [LARGE_SET_SIZE, SMALL_SET_SIZE][(LARGE_SET_SIZE < SMALL_SET_SIZE) as usize] / 2; + +/// Creates a set containing end - start unique string elements. +fn create_set(start: usize, end: usize) -> HashSet { + (start..end).map(|nr| format!("key{}", nr)).collect() +} + +#[bench] +fn set_ops_bit_or(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| &large_set | &small_set) +} + +#[bench] +fn set_ops_bit_and(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| &large_set & &small_set) +} + +#[bench] +fn set_ops_bit_xor(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| &large_set ^ &small_set) +} + +#[bench] +fn set_ops_sub_large_small(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| &large_set - &small_set) +} + +#[bench] +fn set_ops_sub_small_large(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| &small_set - &large_set) +} + +#[bench] +fn set_ops_bit_or_assign(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| { + let mut set = large_set.clone(); + set |= &small_set; + set + }); +} + +#[bench] +fn set_ops_bit_and_assign(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| { + let mut set = small_set.clone(); + set &= &large_set; + set + }); +} + +#[bench] +fn set_ops_bit_xor_assign(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| { + let mut set = large_set.clone(); + set ^= &small_set; + set + }); +} + +#[bench] +fn set_ops_sub_assign_large_small(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| { + let mut set = large_set.clone(); + set -= &small_set; + set + }); +} + +#[bench] +fn set_ops_sub_assign_small_large(b: &mut Bencher) { + let large_set = create_set(0, LARGE_SET_SIZE); + let small_set = create_set( + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, + ); + b.iter(|| { + let mut set = small_set.clone(); + set -= &large_set; + set + }); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/clippy.toml b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/clippy.toml new file mode 100644 index 000000000000..d98bf2c09b13 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/clippy.toml @@ -0,0 +1 @@ +doc-valid-idents = [ "CppCon", "SwissTable", "SipHash", "HashDoS" ] diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/mod.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/mod.rs new file mode 100644 index 000000000000..ef497836cb98 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/mod.rs @@ -0,0 +1,4 @@ +#[cfg(feature = "rayon")] +pub(crate) mod rayon; +#[cfg(feature = "serde")] +mod serde; diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/helpers.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/helpers.rs new file mode 100644 index 000000000000..070b08cd56d7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/helpers.rs @@ -0,0 +1,27 @@ +use alloc::collections::LinkedList; +use alloc::vec::Vec; + +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +/// Helper for collecting parallel iterators to an intermediary +#[allow(clippy::linkedlist)] // yes, we need linked list here for efficient appending! +pub(super) fn collect(iter: I) -> (LinkedList>, usize) { + let list = iter + .into_par_iter() + .fold(Vec::new, |mut vec, elem| { + vec.push(elem); + vec + }) + .map(|vec| { + let mut list = LinkedList::new(); + list.push_back(vec); + list + }) + .reduce(LinkedList::new, |mut list1, mut list2| { + list1.append(&mut list2); + list1 + }); + + let len = list.iter().map(Vec::len).sum(); + (list, len) +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/map.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/map.rs new file mode 100644 index 000000000000..9623ca747c53 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/map.rs @@ -0,0 +1,721 @@ +//! Rayon extensions for `HashMap`. + +use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; +use crate::hash_map::HashMap; +use crate::raw::{Allocator, Global}; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::marker::PhantomData; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; + +/// Parallel iterator over shared references to entries in a map. +/// +/// This iterator is created by the [`par_iter`] method on [`HashMap`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { + type Item = (&'a K, &'a V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { + let r = x.as_ref(); + (&r.0, &r.1) + }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParIter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParIter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { + let r = x.as_ref(); + (&r.0, &r.1) + }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over shared references to keys in a map. +/// +/// This iterator is created by the [`par_keys`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParKeys<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { &x.as_ref().0 }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParKeys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParKeys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().0 }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over shared references to values in a map. +/// +/// This iterator is created by the [`par_values`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParValues<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { + type Item = &'a V; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { &x.as_ref().1 }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParValues<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParValues<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().1 }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over mutable references to entries in a map. +/// +/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`] +/// (provided by the [`IntoParallelRefMutIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html +pub struct ParIterMut<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a mut V)>, +} + +impl<'a, K: Sync, V: Send> ParallelIterator for ParIterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { + let r = x.as_mut(); + (&r.0, &mut r.1) + }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParIterMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over mutable references to values in a map. +/// +/// This iterator is created by the [`par_values_mut`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParValuesMut<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a mut V)>, +} + +impl<'a, K: Sync, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { + type Item = &'a mut V; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { &mut x.as_mut().1 }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParValues { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over entries of a consumed map. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashMap`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + inner: RawIntoParIter<(K, V), A>, +} + +impl ParallelIterator for IntoParIter { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug for IntoParIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel draining iterator over entries of a map. +/// +/// This iterator is created by the [`par_drain`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParDrain<'a, K, V, A: Allocator = Global> { + inner: RawParDrain<'a, (K, V), A>, +} + +impl ParallelIterator for ParDrain<'_, K, V, A> { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParDrain<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +impl HashMap { + /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_keys(&self) -> ParKeys<'_, K, V> { + ParKeys { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } + + /// Visits (potentially in parallel) immutably borrowed values in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_values(&self) -> ParValues<'_, K, V> { + ParValues { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } +} + +impl HashMap { + /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { + ParValuesMut { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } + + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the map's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, K, V, A> { + ParDrain { + inner: self.table.par_drain(), + } + } +} + +impl HashMap +where + K: Eq + Hash + Sync, + V: PartialEq + Sync, + S: BuildHasher + Sync, + A: Allocator + Sync, +{ + /// Returns `true` if the map is equal to another, + /// i.e. both maps contain the same keys mapped to the same values. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_eq(&self, other: &Self) -> bool { + self.len() == other.len() + && self + .into_par_iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl IntoParallelIterator for HashMap { + type Item = (K, V); + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.table.into_par_iter(), + } + } +} + +impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap { + type Item = (&'a K, &'a V); + type Iter = ParIter<'a, K, V>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } +} + +impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap { + type Item = (&'a K, &'a mut V); + type Iter = ParIterMut<'a, K, V>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } +} + +/// Collect (key, value) pairs from a parallel iterator into a +/// hashmap. If multiple pairs correspond to the same key, then the +/// ones produced earlier in the parallel iterator will be +/// overwritten, just as with a sequential iterator. +impl FromParallelIterator<(K, V)> for HashMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher + Default, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + let mut map = HashMap::default(); + map.par_extend(par_iter); + map + } +} + +/// Extend a hash map with items from a parallel iterator. +impl ParallelExtend<(K, V)> for HashMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher, + A: Allocator, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +/// Extend a hash map with copied items from a parallel iterator. +impl<'a, K, V, S, A> ParallelExtend<(&'a K, &'a V)> for HashMap +where + K: Copy + Eq + Hash + Sync, + V: Copy + Sync, + S: BuildHasher, + A: Allocator, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +// This is equal to the normal `HashMap` -- no custom advantage. +fn extend(map: &mut HashMap, par_iter: I) +where + K: Eq + Hash, + S: BuildHasher, + I: IntoParallelIterator, + A: Allocator, + HashMap: Extend, +{ + let (list, len) = super::helpers::collect(par_iter); + + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire length if the map is empty. + // Otherwise reserve half the length (rounded up), so the map + // will only resize twice in the worst case. + let reserve = if map.is_empty() { len } else { (len + 1) / 2 }; + map.reserve(reserve); + for vec in list { + map.extend(vec); + } +} + +#[cfg(test)] +mod test_par_map { + use alloc::vec::Vec; + use core::hash::{Hash, Hasher}; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::hash_map::HashMap; + + struct Droppable<'a> { + k: usize, + counter: &'a AtomicUsize, + } + + impl Droppable<'_> { + fn new(k: usize, counter: &AtomicUsize) -> Droppable<'_> { + counter.fetch_add(1, Ordering::Relaxed); + + Droppable { k, counter } + } + } + + impl Drop for Droppable<'_> { + fn drop(&mut self) { + self.counter.fetch_sub(1, Ordering::Relaxed); + } + } + + impl Clone for Droppable<'_> { + fn clone(&self) -> Self { + Droppable::new(self.k, self.counter) + } + } + + impl Hash for Droppable<'_> { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.k.hash(state); + } + } + + impl PartialEq for Droppable<'_> { + fn eq(&self, other: &Self) -> bool { + self.k == other.k + } + } + + impl Eq for Droppable<'_> {} + + #[test] + fn test_into_iter_drops() { + let key = AtomicUsize::new(0); + let value = AtomicUsize::new(0); + + let hm = { + let mut hm = HashMap::new(); + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + + for i in 0..100 { + let d1 = Droppable::new(i, &key); + let d2 = Droppable::new(i + 100, &value); + hm.insert(d1, d2); + } + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // Ensure that dropping the iterator does not leak anything. + drop(hm.clone().into_par_iter()); + + { + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // retain only half + let _v: Vec<_> = hm.into_par_iter().filter(|(key, _)| key.k < 50).collect(); + + assert_eq!(key.load(Ordering::Relaxed), 50); + assert_eq!(value.load(Ordering::Relaxed), 50); + }; + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_drain_drops() { + let key = AtomicUsize::new(0); + let value = AtomicUsize::new(0); + + let mut hm = { + let mut hm = HashMap::new(); + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + + for i in 0..100 { + let d1 = Droppable::new(i, &key); + let d2 = Droppable::new(i + 100, &value); + hm.insert(d1, d2); + } + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // Ensure that dropping the drain iterator does not leak anything. + drop(hm.clone().par_drain()); + + { + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // retain only half + let _v: Vec<_> = hm.drain().filter(|(key, _)| key.k < 50).collect(); + assert!(hm.is_empty()); + + assert_eq!(key.load(Ordering::Relaxed), 50); + assert_eq!(value.load(Ordering::Relaxed), 50); + }; + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_empty_iter() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.par_drain().count(), 0); + assert_eq!(m.par_keys().count(), 0); + assert_eq!(m.par_values().count(), 0); + assert_eq!(m.par_values_mut().count(), 0); + assert_eq!(m.par_iter().count(), 0); + assert_eq!(m.par_iter_mut().count(), 0); + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + assert_eq!(m.into_par_iter().count(), 0); + } + + #[test] + fn test_iterate() { + let mut m = HashMap::with_capacity(4); + for i in 0..32 { + assert!(m.insert(i, i * 2).is_none()); + } + assert_eq!(m.len(), 32); + + let observed = AtomicUsize::new(0); + + m.par_iter().for_each(|(k, v)| { + assert_eq!(*v, *k * 2); + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_par_iter().collect(); + let keys: Vec<_> = map.par_keys().cloned().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_par_iter().collect(); + let values: Vec<_> = map.par_values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: HashMap<_, _> = vec.into_par_iter().collect(); + map.par_values_mut().for_each(|value| *value *= 2); + let values: Vec<_> = map.par_values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn test_eq() { + let mut m1 = HashMap::new(); + m1.insert(1, 2); + m1.insert(2, 3); + m1.insert(3, 4); + + let mut m2 = HashMap::new(); + m2.insert(1, 2); + m2.insert(2, 3); + + assert!(!m1.par_eq(&m2)); + + m2.insert(3, 4); + + assert!(m1.par_eq(&m2)); + } + + #[test] + fn test_from_iter() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.par_iter().cloned().collect(); + + for &(k, v) in &xs { + assert_eq!(map.get(&k), Some(&v)); + } + } + + #[test] + fn test_extend_ref() { + let mut a = HashMap::new(); + a.insert(1, "one"); + let mut b = HashMap::new(); + b.insert(2, "two"); + b.insert(3, "three"); + + a.par_extend(&b); + + assert_eq!(a.len(), 3); + assert_eq!(a[&1], "one"); + assert_eq!(a[&2], "two"); + assert_eq!(a[&3], "three"); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/mod.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/mod.rs new file mode 100644 index 000000000000..61ca69b61d7f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/mod.rs @@ -0,0 +1,5 @@ +mod helpers; +pub(crate) mod map; +pub(crate) mod raw; +pub(crate) mod set; +pub(crate) mod table; diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/raw.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/raw.rs new file mode 100644 index 000000000000..612be47a55d1 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/raw.rs @@ -0,0 +1,230 @@ +use crate::raw::Bucket; +use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable}; +use crate::scopeguard::guard; +use core::marker::PhantomData; +use core::mem; +use core::ptr::NonNull; +use rayon::iter::{ + plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer}, + ParallelIterator, +}; + +/// Parallel iterator which returns a raw pointer to every full bucket in the table. +pub struct RawParIter { + iter: RawIterRange, +} + +impl RawParIter { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) unsafe fn iter(&self) -> RawIterRange { + self.iter.clone() + } +} + +impl Clone for RawParIter { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + iter: self.iter.clone(), + } + } +} + +impl From> for RawParIter { + fn from(it: RawIter) -> Self { + RawParIter { iter: it.iter } + } +} + +impl ParallelIterator for RawParIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let producer = ParIterProducer { iter: self.iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +/// Producer which returns a `Bucket` for every element. +struct ParIterProducer { + iter: RawIterRange, +} + +impl UnindexedProducer for ParIterProducer { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn split(self) -> (Self, Option) { + let (left, right) = self.iter.split(); + let left = ParIterProducer { iter: left }; + let right = right.map(|right| ParIterProducer { iter: right }); + (left, right) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold_with(self, folder: F) -> F + where + F: Folder, + { + folder.consume_iter(self.iter) + } +} + +/// Parallel iterator which consumes a table and returns elements. +pub struct RawIntoParIter { + table: RawTable, +} + +impl RawIntoParIter { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) unsafe fn par_iter(&self) -> RawParIter { + self.table.par_iter() + } +} + +impl ParallelIterator for RawIntoParIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let iter = unsafe { self.table.iter().iter }; + let _guard = guard(self.table.into_allocation(), |alloc| { + if let Some((ptr, layout, ref alloc)) = *alloc { + unsafe { + alloc.deallocate(ptr, layout); + } + } + }); + let producer = ParDrainProducer { iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +/// Parallel iterator which consumes elements without freeing the table storage. +pub struct RawParDrain<'a, T, A: Allocator = Global> { + // We don't use a &'a mut RawTable because we want RawParDrain to be + // covariant over T. + table: NonNull>, + marker: PhantomData<&'a RawTable>, +} + +unsafe impl Send for RawParDrain<'_, T, A> {} + +impl RawParDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) unsafe fn par_iter(&self) -> RawParIter { + self.table.as_ref().par_iter() + } +} + +impl ParallelIterator for RawParDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let _guard = guard(self.table, |table| unsafe { + table.as_mut().clear_no_drop(); + }); + let iter = unsafe { self.table.as_ref().iter().iter }; + mem::forget(self); + let producer = ParDrainProducer { iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +impl Drop for RawParDrain<'_, T, A> { + fn drop(&mut self) { + // If drive_unindexed is not called then simply clear the table. + unsafe { + self.table.as_mut().clear(); + } + } +} + +/// Producer which will consume all elements in the range, even if it is dropped +/// halfway through. +struct ParDrainProducer { + iter: RawIterRange, +} + +impl UnindexedProducer for ParDrainProducer { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn split(self) -> (Self, Option) { + let (left, right) = self.iter.clone().split(); + mem::forget(self); + let left = ParDrainProducer { iter: left }; + let right = right.map(|right| ParDrainProducer { iter: right }); + (left, right) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold_with(mut self, mut folder: F) -> F + where + F: Folder, + { + // Make sure to modify the iterator in-place so that any remaining + // elements are processed in our Drop impl. + for item in &mut self.iter { + folder = folder.consume(unsafe { item.read() }); + if folder.full() { + return folder; + } + } + + // If we processed all elements then we don't need to run the drop. + mem::forget(self); + folder + } +} + +impl Drop for ParDrainProducer { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + // Drop all remaining elements + if mem::needs_drop::() { + for item in &mut self.iter { + unsafe { + item.drop(); + } + } + } + } +} + +impl RawTable { + /// Returns a parallel iterator over the elements in a `RawTable`. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn par_iter(&self) -> RawParIter { + RawParIter { + iter: self.iter().iter, + } + } + + /// Returns a parallel iterator over the elements in a `RawTable`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_par_iter(self) -> RawIntoParIter { + RawIntoParIter { table: self } + } + + /// Returns a parallel iterator which consumes all elements of a `RawTable` + /// without freeing its memory allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> RawParDrain<'_, T, A> { + RawParDrain { + table: NonNull::from(self), + marker: PhantomData, + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/set.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/set.rs new file mode 100644 index 000000000000..3de98fccb89d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/set.rs @@ -0,0 +1,659 @@ +//! Rayon extensions for `HashSet`. + +use super::map; +use crate::hash_set::HashSet; +use crate::raw::{Allocator, Global}; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; + +/// Parallel iterator over elements of a consumed set. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashSet`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter +/// [`HashSet`]: /hashbrown/struct.HashSet.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + inner: map::IntoParIter, +} + +impl ParallelIterator for IntoParIter { + type Item = T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.map(|(k, _)| k).drive_unindexed(consumer) + } +} + +/// Parallel draining iterator over entries of a set. +/// +/// This iterator is created by the [`par_drain`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParDrain<'a, T, A: Allocator = Global> { + inner: map::ParDrain<'a, T, (), A>, +} + +impl ParallelIterator for ParDrain<'_, T, A> { + type Item = T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.map(|(k, _)| k).drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in a set. +/// +/// This iterator is created by the [`par_iter`] method on [`HashSet`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter +/// [`HashSet`]: /hashbrown/struct.HashSet.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, T> { + inner: map::ParKeys<'a, T, ()>, +} + +impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the difference of +/// sets. +/// +/// This iterator is created by the [`par_difference`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParDifference<'a, T, S, A: Allocator = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .filter(|&x| !self.b.contains(x)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the symmetric +/// difference of sets. +/// +/// This iterator is created by the [`par_symmetric_difference`] method on +/// [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .par_difference(self.b) + .chain(self.b.par_difference(self.a)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the intersection of +/// sets. +/// +/// This iterator is created by the [`par_intersection`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParIntersection<'a, T, S, A: Allocator = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .filter(|&x| self.b.contains(x)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the union of sets. +/// +/// This iterator is created by the [`par_union`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParUnion<'a, T, S, A: Allocator = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + // We'll iterate one set in full, and only the remaining difference from the other. + // Use the smaller set for the difference in order to reduce hash lookups. + let (smaller, larger) = if self.a.len() <= self.b.len() { + (self.a, self.b) + } else { + (self.b, self.a) + }; + larger + .into_par_iter() + .chain(smaller.par_difference(larger)) + .drive_unindexed(consumer) + } +} + +impl HashSet +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Sync, +{ + /// Visits (potentially in parallel) the values representing the union, + /// i.e. all the values in `self` or `other`, without duplicates. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S, A> { + ParUnion { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the difference, + /// i.e. the values that are in `self` but not in `other`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S, A> { + ParDifference { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the symmetric + /// difference, i.e. the values that are in `self` or in `other` but not in both. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_symmetric_difference<'a>( + &'a self, + other: &'a Self, + ) -> ParSymmetricDifference<'a, T, S, A> { + ParSymmetricDifference { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the + /// intersection, i.e. the values that are both in `self` and `other`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S, A> { + ParIntersection { a: self, b: other } + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_disjoint(&self, other: &Self) -> bool { + self.into_par_iter().all(|x| !other.contains(x)) + } + + /// Returns `true` if the set is a subset of another, + /// i.e. `other` contains at least all the values in `self`. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_subset(&self, other: &Self) -> bool { + if self.len() <= other.len() { + self.into_par_iter().all(|x| other.contains(x)) + } else { + false + } + } + + /// Returns `true` if the set is a superset of another, + /// i.e. `self` contains at least all the values in `other`. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_superset(&self, other: &Self) -> bool { + other.par_is_subset(self) + } + + /// Returns `true` if the set is equal to another, + /// i.e. both sets contain the same values. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.par_is_subset(other) + } +} + +impl HashSet +where + T: Eq + Hash + Send, + A: Allocator + Send, +{ + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the set's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { + ParDrain { + inner: self.map.par_drain(), + } + } +} + +impl IntoParallelIterator for HashSet { + type Item = T; + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.map.into_par_iter(), + } + } +} + +impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet { + type Item = &'a T; + type Iter = ParIter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: self.map.par_keys(), + } + } +} + +/// Collect values from a parallel iterator into a hashset. +impl FromParallelIterator for HashSet +where + T: Eq + Hash + Send, + S: BuildHasher + Default, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + let mut set = HashSet::default(); + set.par_extend(par_iter); + set + } +} + +/// Extend a hash set with items from a parallel iterator. +impl ParallelExtend for HashSet +where + T: Eq + Hash + Send, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +/// Extend a hash set with copied items from a parallel iterator. +impl<'a, T, S> ParallelExtend<&'a T> for HashSet +where + T: 'a + Copy + Eq + Hash + Sync, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +// This is equal to the normal `HashSet` -- no custom advantage. +fn extend(set: &mut HashSet, par_iter: I) +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, + I: IntoParallelIterator, + HashSet: Extend, +{ + let (list, len) = super::helpers::collect(par_iter); + + // Values may be already present or show multiple times in the iterator. + // Reserve the entire length if the set is empty. + // Otherwise reserve half the length (rounded up), so the set + // will only resize twice in the worst case. + let reserve = if set.is_empty() { len } else { (len + 1) / 2 }; + set.reserve(reserve); + for vec in list { + set.extend(vec); + } +} + +#[cfg(test)] +mod test_par_set { + use alloc::vec::Vec; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::hash_set::HashSet; + + #[test] + fn test_disjoint() { + let mut xs = HashSet::new(); + let mut ys = HashSet::new(); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(xs.insert(5)); + assert!(ys.insert(11)); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(xs.insert(7)); + assert!(xs.insert(19)); + assert!(xs.insert(4)); + assert!(ys.insert(2)); + assert!(ys.insert(-11)); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(ys.insert(7)); + assert!(!xs.par_is_disjoint(&ys)); + assert!(!ys.par_is_disjoint(&xs)); + } + + #[test] + fn test_subset_and_superset() { + let mut a = HashSet::new(); + assert!(a.insert(0)); + assert!(a.insert(5)); + assert!(a.insert(11)); + assert!(a.insert(7)); + + let mut b = HashSet::new(); + assert!(b.insert(0)); + assert!(b.insert(7)); + assert!(b.insert(19)); + assert!(b.insert(250)); + assert!(b.insert(11)); + assert!(b.insert(200)); + + assert!(!a.par_is_subset(&b)); + assert!(!a.par_is_superset(&b)); + assert!(!b.par_is_subset(&a)); + assert!(!b.par_is_superset(&a)); + + assert!(b.insert(5)); + + assert!(a.par_is_subset(&b)); + assert!(!a.par_is_superset(&b)); + assert!(!b.par_is_subset(&a)); + assert!(b.par_is_superset(&a)); + } + + #[test] + fn test_iterate() { + let mut a = HashSet::new(); + for i in 0..32 { + assert!(a.insert(i)); + } + let observed = AtomicUsize::new(0); + a.par_iter().for_each(|k| { + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_intersection() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(11)); + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(77)); + assert!(a.insert(103)); + assert!(a.insert(5)); + assert!(a.insert(-5)); + + assert!(b.insert(2)); + assert!(b.insert(11)); + assert!(b.insert(77)); + assert!(b.insert(-9)); + assert!(b.insert(-42)); + assert!(b.insert(5)); + assert!(b.insert(3)); + + let expected = [3, 5, 11, 77]; + let i = a + .par_intersection(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(3)); + assert!(b.insert(9)); + + let expected = [1, 5, 11]; + let i = a + .par_difference(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_symmetric_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(-2)); + assert!(b.insert(3)); + assert!(b.insert(9)); + assert!(b.insert(14)); + assert!(b.insert(22)); + + let expected = [-2, 1, 5, 11, 14, 22]; + let i = a + .par_symmetric_difference(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_union() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + assert!(a.insert(16)); + assert!(a.insert(19)); + assert!(a.insert(24)); + + assert!(b.insert(-2)); + assert!(b.insert(1)); + assert!(b.insert(5)); + assert!(b.insert(9)); + assert!(b.insert(13)); + assert!(b.insert(19)); + + let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; + let i = a + .par_union(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_from_iter() { + let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9]; + + let set: HashSet<_> = xs.par_iter().cloned().collect(); + + for x in &xs { + assert!(set.contains(x)); + } + } + + #[test] + fn test_move_iter() { + let hs = { + let mut hs = HashSet::new(); + + hs.insert('a'); + hs.insert('b'); + + hs + }; + + let v = hs.into_par_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } + + #[test] + fn test_eq() { + // These constants once happened to expose a bug in insert(). + // I'm keeping them around to prevent a regression. + let mut s1 = HashSet::new(); + + s1.insert(1); + s1.insert(2); + s1.insert(3); + + let mut s2 = HashSet::new(); + + s2.insert(1); + s2.insert(2); + + assert!(!s1.par_eq(&s2)); + + s2.insert(3); + + assert!(s1.par_eq(&s2)); + } + + #[test] + fn test_extend_ref() { + let mut a = HashSet::new(); + a.insert(1); + + a.par_extend(&[2, 3, 4][..]); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + + let mut b = HashSet::new(); + b.insert(5); + b.insert(6); + + a.par_extend(&b); + + assert_eq!(a.len(), 6); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + assert!(a.contains(&5)); + assert!(a.contains(&6)); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/table.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/table.rs new file mode 100644 index 000000000000..cb04a03dfaa9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/rayon/table.rs @@ -0,0 +1,249 @@ +//! Rayon extensions for `HashTable`. + +use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; +use crate::hash_table::HashTable; +use crate::raw::{Allocator, Global}; +use core::fmt; +use core::marker::PhantomData; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +/// Parallel iterator over shared references to entries in a map. +/// +/// This iterator is created by the [`par_iter`] method on [`HashTable`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashTable.html#method.par_iter +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, T> { + inner: RawParIter, + marker: PhantomData<&'a T>, +} + +impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { x.as_ref() }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParIter<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { x.as_ref() }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over mutable references to entries in a map. +/// +/// This iterator is created by the [`par_iter_mut`] method on [`HashTable`] +/// (provided by the [`IntoParallelRefMutIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter_mut`]: /hashbrown/struct.HashTable.html#method.par_iter_mut +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html +pub struct ParIterMut<'a, T> { + inner: RawParIter, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T: Send> ParallelIterator for ParIterMut<'a, T> { + type Item = &'a mut T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { x.as_mut() }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParIterMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over entries of a consumed map. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashTable`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashTable.html#method.into_par_iter +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + inner: RawIntoParIter, +} + +impl ParallelIterator for IntoParIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug for IntoParIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel draining iterator over entries of a map. +/// +/// This iterator is created by the [`par_drain`] method on [`HashTable`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashTable.html#method.par_drain +/// [`HashTable`]: /hashbrown/struct.HashTable.html +pub struct ParDrain<'a, T, A: Allocator = Global> { + inner: RawParDrain<'a, T, A>, +} + +impl ParallelIterator for ParDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParDrain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +impl HashTable { + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the map's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { + ParDrain { + inner: self.raw.par_drain(), + } + } +} + +impl IntoParallelIterator for HashTable { + type Item = T; + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.raw.into_par_iter(), + } + } +} + +impl<'a, T: Sync, A: Allocator> IntoParallelIterator for &'a HashTable { + type Item = &'a T; + type Iter = ParIter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: unsafe { self.raw.par_iter() }, + marker: PhantomData, + } + } +} + +impl<'a, T: Send, A: Allocator> IntoParallelIterator for &'a mut HashTable { + type Item = &'a mut T; + type Iter = ParIterMut<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + inner: unsafe { self.raw.par_iter() }, + marker: PhantomData, + } + } +} + +#[cfg(test)] +mod test_par_table { + use alloc::vec::Vec; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::{hash_map::make_hash, hash_table::HashTable, DefaultHashBuilder}; + + #[test] + fn test_iterate() { + let hasher = DefaultHashBuilder::default(); + let mut a = HashTable::new(); + for i in 0..32 { + a.insert_unique(make_hash(&hasher, &i), i, |x| make_hash(&hasher, x)); + } + let observed = AtomicUsize::new(0); + a.par_iter().for_each(|k| { + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_move_iter() { + let hasher = DefaultHashBuilder::default(); + let hs = { + let mut hs = HashTable::new(); + + hs.insert_unique(make_hash(&hasher, &'a'), 'a', |x| make_hash(&hasher, x)); + hs.insert_unique(make_hash(&hasher, &'b'), 'b', |x| make_hash(&hasher, x)); + + hs + }; + + let v = hs.into_par_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/serde.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/serde.rs new file mode 100644 index 000000000000..0a76dbec25c4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/external_trait_impls/serde.rs @@ -0,0 +1,220 @@ +mod size_hint { + use core::cmp; + + /// This presumably exists to prevent denial of service attacks. + /// + /// Original discussion: https://github.com/serde-rs/serde/issues/1114. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn cautious(hint: Option) -> usize { + cmp::min(hint.unwrap_or(0), 4096) + } +} + +mod map { + use crate::raw::Allocator; + use core::fmt; + use core::hash::{BuildHasher, Hash}; + use core::marker::PhantomData; + use serde::de::{Deserialize, Deserializer, MapAccess, Visitor}; + use serde::ser::{Serialize, Serializer}; + + use crate::hash_map::HashMap; + + use super::size_hint; + + impl Serialize for HashMap + where + K: Serialize + Eq + Hash, + V: Serialize, + H: BuildHasher, + A: Allocator, + { + #[cfg_attr(feature = "inline-more", inline)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_map(self) + } + } + + impl<'de, K, V, S, A> Deserialize<'de> for HashMap + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, + A: Allocator + Default, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct MapVisitor + where + A: Allocator, + { + marker: PhantomData>, + } + + impl<'de, K, V, S, A> Visitor<'de> for MapVisitor + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, + A: Allocator + Default, + { + type Value = HashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a map") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_map(self, mut map: M) -> Result + where + M: MapAccess<'de>, + { + let mut values = HashMap::with_capacity_and_hasher_in( + size_hint::cautious(map.size_hint()), + S::default(), + A::default(), + ); + + while let Some((key, value)) = map.next_entry()? { + values.insert(key, value); + } + + Ok(values) + } + } + + let visitor = MapVisitor { + marker: PhantomData, + }; + deserializer.deserialize_map(visitor) + } + } +} + +mod set { + use crate::raw::Allocator; + use core::fmt; + use core::hash::{BuildHasher, Hash}; + use core::marker::PhantomData; + use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; + use serde::ser::{Serialize, Serializer}; + + use crate::hash_set::HashSet; + + use super::size_hint; + + impl Serialize for HashSet + where + T: Serialize + Eq + Hash, + H: BuildHasher, + A: Allocator, + { + #[cfg_attr(feature = "inline-more", inline)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_seq(self) + } + } + + impl<'de, T, S, A> Deserialize<'de> for HashSet + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + A: Allocator + Default, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct SeqVisitor + where + A: Allocator, + { + marker: PhantomData>, + } + + impl<'de, T, S, A> Visitor<'de> for SeqVisitor + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + A: Allocator + Default, + { + type Value = HashSet; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_seq(self, mut seq: M) -> Result + where + M: SeqAccess<'de>, + { + let mut values = HashSet::with_capacity_and_hasher_in( + size_hint::cautious(seq.size_hint()), + S::default(), + A::default(), + ); + + while let Some(value) = seq.next_element()? { + values.insert(value); + } + + Ok(values) + } + } + + let visitor = SeqVisitor { + marker: PhantomData, + }; + deserializer.deserialize_seq(visitor) + } + + #[allow(clippy::missing_errors_doc)] + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet) + where + A: Allocator; + + impl<'a, 'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'a, T, S, A> + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + A: Allocator, + { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_seq(self, mut seq: M) -> Result + where + M: SeqAccess<'de>, + { + self.0.clear(); + self.0.reserve(size_hint::cautious(seq.size_hint())); + + while let Some(value) = seq.next_element()? { + self.0.insert(value); + } + + Ok(()) + } + } + + deserializer.deserialize_seq(SeqInPlaceVisitor(place)) + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/lib.rs new file mode 100644 index 000000000000..ba5d3c14504c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/lib.rs @@ -0,0 +1,175 @@ +//! This crate is a Rust port of Google's high-performance [SwissTable] hash +//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap` +//! and `HashSet` types. +//! +//! The original C++ version of [SwissTable] can be found [here], and this +//! [CppCon talk] gives an overview of how the algorithm works. +//! +//! [SwissTable]: https://abseil.io/blog/20180927-swisstables +//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 + +#![no_std] +#![cfg_attr( + feature = "nightly", + feature( + test, + core_intrinsics, + dropck_eyepatch, + min_specialization, + extend_one, + allocator_api, + slice_ptr_get, + maybe_uninit_array_assume_init, + ) +)] +#![allow( + clippy::doc_markdown, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::option_if_let_else, + clippy::redundant_else, + clippy::manual_map, + clippy::missing_safety_doc, + clippy::missing_errors_doc +)] +#![warn(missing_docs)] +#![warn(rust_2018_idioms)] +#![cfg_attr(feature = "nightly", allow(internal_features))] + +/// Default hasher for [`HashMap`] and [`HashSet`]. +#[cfg(feature = "default-hasher")] +pub type DefaultHashBuilder = foldhash::fast::RandomState; + +/// Dummy default hasher for [`HashMap`] and [`HashSet`]. +#[cfg(not(feature = "default-hasher"))] +pub enum DefaultHashBuilder {} + +#[cfg(test)] +#[macro_use] +extern crate std; + +#[cfg_attr(test, macro_use)] +extern crate alloc; + +#[cfg(feature = "nightly")] +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +#[macro_use] +mod macros; + +mod raw; + +mod external_trait_impls; +mod map; +#[cfg(feature = "raw-entry")] +mod raw_entry; +#[cfg(feature = "rustc-internal-api")] +mod rustc_entry; +mod scopeguard; +mod set; +mod table; + +pub mod hash_map { + //! A hash map implemented with quadratic probing and SIMD lookup. + pub use crate::map::*; + + #[cfg(feature = "rustc-internal-api")] + pub use crate::rustc_entry::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash maps. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::map::*; + } +} +pub mod hash_set { + //! A hash set implemented as a `HashMap` where the value is `()`. + pub use crate::set::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash sets. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::set::*; + } +} +pub mod hash_table { + //! A hash table implemented with quadratic probing and SIMD lookup. + pub use crate::table::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash tables. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::table::*; + } +} + +pub use crate::map::HashMap; +pub use crate::set::HashSet; +pub use crate::table::HashTable; + +#[cfg(feature = "equivalent")] +pub use equivalent::Equivalent; + +// This is only used as a fallback when building as part of `std`. +#[cfg(not(feature = "equivalent"))] +/// Key equivalence trait. +/// +/// This trait defines the function used to compare the input value with the +/// map keys (or set values) during a lookup operation such as [`HashMap::get`] +/// or [`HashSet::contains`]. +/// It is provided with a blanket implementation based on the +/// [`Borrow`](core::borrow::Borrow) trait. +/// +/// # Correctness +/// +/// Equivalent values must hash to the same value. +pub trait Equivalent { + /// Checks if this value is equivalent to the given key. + /// + /// Returns `true` if both values are equivalent, and `false` otherwise. + /// + /// # Correctness + /// + /// When this function returns `true`, both `self` and `key` must hash to + /// the same value. + fn equivalent(&self, key: &K) -> bool; +} + +#[cfg(not(feature = "equivalent"))] +impl Equivalent for Q +where + Q: Eq, + K: core::borrow::Borrow, +{ + fn equivalent(&self, key: &K) -> bool { + self == key.borrow() + } +} + +/// The error type for `try_reserve` methods. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum TryReserveError { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + + /// The memory allocator returned an error + AllocError { + /// The layout of the allocation request that failed. + layout: alloc::alloc::Layout, + }, +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/macros.rs new file mode 100644 index 000000000000..eaba6bed1fcb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/macros.rs @@ -0,0 +1,70 @@ +// See the cfg-if crate. +#[allow(unused_macro_rules)] +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + cfg_if! { + @__items + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + $( + else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + )* + ) => { + cfg_if! { + @__items + () ; + ( ($($i_met),*) ($($i_it)*) ), + $( ( ($($e_met),*) ($($e_it)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated cfgs in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { + // Emit all items within one block, applying an appropriate #[cfg]. The + // #[cfg] will require all `$m` matchers specified and must also negate + // all previous matchers. + cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$m` matchers to the list of `$not` matchers as future emissions + // will have to negate everything we just matched as well. + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to Apply a cfg attribute to a list of items + (@__apply $m:meta, $($it:item)*) => { + $(#[$m] $it)* + }; +} + +// Helper macro for specialization. This also helps avoid parse errors if the +// default fn syntax for specialization changes in the future. +#[cfg(feature = "nightly")] +macro_rules! default_fn { + (#[$($a:tt)*] $($tt:tt)*) => { + #[$($a)*] default $($tt)* + } +} +#[cfg(not(feature = "nightly"))] +macro_rules! default_fn { + ($($tt:tt)*) => { + $($tt)* + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/map.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/map.rs new file mode 100644 index 000000000000..1969bff5436c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/map.rs @@ -0,0 +1,6510 @@ +use crate::raw::{ + Allocator, Bucket, Global, RawDrain, RawExtractIf, RawIntoIter, RawIter, RawTable, +}; +use crate::{DefaultHashBuilder, Equivalent, TryReserveError}; +use core::borrow::Borrow; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash}; +use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::mem; +use core::ops::Index; + +#[cfg(feature = "raw-entry")] +pub use crate::raw_entry::*; + +/// A hash map implemented with quadratic probing and SIMD lookup. +/// +/// The default hashing algorithm is currently [`foldhash`], though this is +/// subject to change at any point in the future. This hash function is very +/// fast for all types of keys, but this algorithm will typically *not* protect +/// against attacks such as HashDoS. +/// +/// The hashing algorithm can be replaced on a per-`HashMap` basis using the +/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many +/// alternative algorithms are available on crates.io, such as the [`fnv`] crate. +/// +/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although +/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. +/// If you implement these yourself, it is important that the following +/// property holds: +/// +/// ```text +/// k1 == k2 -> hash(k1) == hash(k2) +/// ``` +/// +/// In other words, if two keys are equal, their hashes must be equal. +/// +/// It is a logic error for a key to be modified in such a way that the key's +/// hash, as determined by the [`Hash`] trait, or its equality, as determined by +/// the [`Eq`] trait, changes while it is in the map. This is normally only +/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. +/// +/// It is also a logic error for the [`Hash`] implementation of a key to panic. +/// This is generally only possible if the trait is implemented manually. If a +/// panic does occur then the contents of the `HashMap` may become corrupted and +/// some items may be dropped from the table. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// // Type inference lets us omit an explicit type signature (which +/// // would be `HashMap` in this example). +/// let mut book_reviews = HashMap::new(); +/// +/// // Review some books. +/// book_reviews.insert( +/// "Adventures of Huckleberry Finn".to_string(), +/// "My favorite book.".to_string(), +/// ); +/// book_reviews.insert( +/// "Grimms' Fairy Tales".to_string(), +/// "Masterpiece.".to_string(), +/// ); +/// book_reviews.insert( +/// "Pride and Prejudice".to_string(), +/// "Very enjoyable.".to_string(), +/// ); +/// book_reviews.insert( +/// "The Adventures of Sherlock Holmes".to_string(), +/// "Eye lyked it alot.".to_string(), +/// ); +/// +/// // Check for a specific one. +/// // When collections store owned values (String), they can still be +/// // queried using references (&str). +/// if !book_reviews.contains_key("Les Misérables") { +/// println!("We've got {} reviews, but Les Misérables ain't one.", +/// book_reviews.len()); +/// } +/// +/// // oops, this review has a lot of spelling mistakes, let's delete it. +/// book_reviews.remove("The Adventures of Sherlock Holmes"); +/// +/// // Look up the values associated with some keys. +/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; +/// for &book in &to_find { +/// match book_reviews.get(book) { +/// Some(review) => println!("{}: {}", book, review), +/// None => println!("{} is unreviewed.", book) +/// } +/// } +/// +/// // Look up the value for a key (will panic if the key is not found). +/// println!("Review for Jane: {}", book_reviews["Pride and Prejudice"]); +/// +/// // Iterate over everything. +/// for (book, review) in &book_reviews { +/// println!("{}: \"{}\"", book, review); +/// } +/// ``` +/// +/// `HashMap` also implements an [`Entry API`](#method.entry), which allows +/// for more complex methods of getting, setting, updating and removing keys and +/// their values: +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// // type inference lets us omit an explicit type signature (which +/// // would be `HashMap<&str, u8>` in this example). +/// let mut player_stats = HashMap::new(); +/// +/// fn random_stat_buff() -> u8 { +/// // could actually return some random value here - let's just return +/// // some fixed value for now +/// 42 +/// } +/// +/// // insert a key only if it doesn't already exist +/// player_stats.entry("health").or_insert(100); +/// +/// // insert a key using a function that provides a new value only if it +/// // doesn't already exist +/// player_stats.entry("defence").or_insert_with(random_stat_buff); +/// +/// // update a key, guarding against the key possibly not being set +/// let stat = player_stats.entry("attack").or_insert(100); +/// *stat += random_stat_buff(); +/// ``` +/// +/// The easiest way to use `HashMap` with a custom key type is to derive [`Eq`] and [`Hash`]. +/// We must also derive [`PartialEq`]. +/// +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html +/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html +/// [`default`]: #method.default +/// [`with_hasher`]: #method.with_hasher +/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher +/// [`fnv`]: https://crates.io/crates/fnv +/// [`foldhash`]: https://crates.io/crates/foldhash +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// #[derive(Hash, Eq, PartialEq, Debug)] +/// struct Viking { +/// name: String, +/// country: String, +/// } +/// +/// impl Viking { +/// /// Creates a new Viking. +/// fn new(name: &str, country: &str) -> Viking { +/// Viking { name: name.to_string(), country: country.to_string() } +/// } +/// } +/// +/// // Use a HashMap to store the vikings' health points. +/// let mut vikings = HashMap::new(); +/// +/// vikings.insert(Viking::new("Einar", "Norway"), 25); +/// vikings.insert(Viking::new("Olaf", "Denmark"), 24); +/// vikings.insert(Viking::new("Harald", "Iceland"), 12); +/// +/// // Use derived implementation to print the status of the vikings. +/// for (viking, health) in &vikings { +/// println!("{:?} has {} hp", viking, health); +/// } +/// ``` +/// +/// A `HashMap` with fixed list of elements can be initialized from an array: +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let timber_resources: HashMap<&str, i32> = [("Norway", 100), ("Denmark", 50), ("Iceland", 10)] +/// .into_iter().collect(); +/// // use the values stored in map +/// ``` +pub struct HashMap { + pub(crate) hash_builder: S, + pub(crate) table: RawTable<(K, V), A>, +} + +impl Clone for HashMap { + fn clone(&self) -> Self { + HashMap { + hash_builder: self.hash_builder.clone(), + table: self.table.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.table.clone_from(&source.table); + + // Update hash_builder only if we successfully cloned all elements. + self.hash_builder.clone_from(&source.hash_builder); + } +} + +/// Ensures that a single closure type across uses of this which, in turn prevents multiple +/// instances of any functions like `RawTable::reserve` from being generated +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hasher(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ +where + Q: Hash, + S: BuildHasher, +{ + move |val| make_hash::(hash_builder, &val.0) +} + +/// Ensures that a single closure type across uses of this which, in turn prevents multiple +/// instances of any functions like `RawTable::reserve` from being generated +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn equivalent_key(k: &Q) -> impl Fn(&(K, V)) -> bool + '_ +where + Q: Equivalent + ?Sized, +{ + move |x| k.equivalent(&x.0) +} + +/// Ensures that a single closure type across uses of this which, in turn prevents multiple +/// instances of any functions like `RawTable::reserve` from being generated +#[cfg_attr(feature = "inline-more", inline)] +#[allow(dead_code)] +pub(crate) fn equivalent(k: &Q) -> impl Fn(&K) -> bool + '_ +where + Q: Equivalent + ?Sized, +{ + move |x| k.equivalent(x) +} + +#[cfg(not(feature = "nightly"))] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +where + Q: Hash + ?Sized, + S: BuildHasher, +{ + use core::hash::Hasher; + let mut state = hash_builder.build_hasher(); + val.hash(&mut state); + state.finish() +} + +#[cfg(feature = "nightly")] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +where + Q: Hash + ?Sized, + S: BuildHasher, +{ + hash_builder.hash_one(val) +} + +#[cfg(feature = "default-hasher")] +impl HashMap { + /// Creates an empty `HashMap`. + /// + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_hasher`](HashMap::with_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// assert_eq!(map.len(), 0); + /// assert_eq!(map.capacity(), 0); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new() -> Self { + Self::default() + } + + /// Creates an empty `HashMap` with the specified capacity. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_capacity_and_hasher`](HashMap::with_capacity_and_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10); + /// assert_eq!(map.len(), 0); + /// assert!(map.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, DefaultHashBuilder::default()) + } +} + +#[cfg(feature = "default-hasher")] +impl HashMap { + /// Creates an empty `HashMap` using the given allocator. + /// + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_hasher_in`](HashMap::with_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use bumpalo::Bump; + /// + /// let bump = Bump::new(); + /// let mut map = HashMap::new_in(&bump); + /// + /// // The created HashMap holds none elements + /// assert_eq!(map.len(), 0); + /// + /// // The created HashMap also doesn't allocate memory + /// assert_eq!(map.capacity(), 0); + /// + /// // Now we insert element inside created HashMap + /// map.insert("One", 1); + /// // We can see that the HashMap holds 1 element + /// assert_eq!(map.len(), 1); + /// // And it also allocates some capacity + /// assert!(map.capacity() > 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new_in(alloc: A) -> Self { + Self::with_hasher_in(DefaultHashBuilder::default(), alloc) + } + + /// Creates an empty `HashMap` with the specified capacity using the given allocator. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_capacity_and_hasher_in`](HashMap::with_capacity_and_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use bumpalo::Bump; + /// + /// let bump = Bump::new(); + /// let mut map = HashMap::with_capacity_in(5, &bump); + /// + /// // The created HashMap holds none elements + /// assert_eq!(map.len(), 0); + /// // But it can hold at least 5 elements without reallocating + /// let empty_map_capacity = map.capacity(); + /// assert!(empty_map_capacity >= 5); + /// + /// // Now we insert some 5 elements inside created HashMap + /// map.insert("One", 1); + /// map.insert("Two", 2); + /// map.insert("Three", 3); + /// map.insert("Four", 4); + /// map.insert("Five", 5); + /// + /// // We can see that the HashMap holds 5 elements + /// assert_eq!(map.len(), 5); + /// // But its capacity isn't changed + /// assert_eq!(map.capacity(), empty_map_capacity) + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc) + } +} + +impl HashMap { + /// Creates an empty `HashMap` which will use the given hash builder to hash + /// keys. + /// + /// The hash map is initially created with a capacity of 0, so it will not + /// allocate until it is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the `HashMap` to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_hasher(s); + /// assert_eq!(map.len(), 0); + /// assert_eq!(map.capacity(), 0); + /// + /// map.insert(1, 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher(hash_builder: S) -> Self { + Self { + hash_builder, + table: RawTable::new(), + } + } + + /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` + /// to hash the keys. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the `HashMap` to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_capacity_and_hasher(10, s); + /// assert_eq!(map.len(), 0); + /// assert!(map.capacity() >= 10); + /// + /// map.insert(1, 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + Self { + hash_builder, + table: RawTable::with_capacity(capacity), + } + } +} + +impl HashMap { + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + self.table.allocator() + } + + /// Creates an empty `HashMap` which will use the given hash builder to hash + /// keys. It will be allocated with the given allocator. + /// + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_hasher(s); + /// map.insert(1, 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher_in(hash_builder: S, alloc: A) -> Self { + Self { + hash_builder, + table: RawTable::new_in(alloc), + } + } + + /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` + /// to hash the keys. It will be allocated with the given allocator. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_capacity_and_hasher(10, s); + /// map.insert(1, 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher_in(capacity: usize, hash_builder: S, alloc: A) -> Self { + Self { + hash_builder, + table: RawTable::with_capacity_in(capacity, alloc), + } + } + + /// Returns a reference to the map's [`BuildHasher`]. + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::DefaultHashBuilder; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let map: HashMap = HashMap::with_hasher(hasher); + /// let hasher: &DefaultHashBuilder = map.hasher(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn hasher(&self) -> &S { + &self.hash_builder + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the `HashMap` might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let map: HashMap = HashMap::with_capacity(100); + /// assert_eq!(map.len(), 0); + /// assert!(map.capacity() >= 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.table.capacity() + } + + /// An iterator visiting all keys in arbitrary order. + /// The iterator element type is `&'a K`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec<&str> = Vec::new(); + /// + /// for key in map.keys() { + /// println!("{}", key); + /// vec.push(*key); + /// } + /// + /// // The `Keys` iterator produces keys in arbitrary order, so the + /// // keys must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, ["a", "b", "c"]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn keys(&self) -> Keys<'_, K, V> { + Keys { inner: self.iter() } + } + + /// An iterator visiting all values in arbitrary order. + /// The iterator element type is `&'a V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in map.values() { + /// println!("{}", val); + /// vec.push(*val); + /// } + /// + /// // The `Values` iterator produces values in arbitrary order, so the + /// // values must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [1, 2, 3]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn values(&self) -> Values<'_, K, V> { + Values { inner: self.iter() } + } + + /// An iterator visiting all values mutably in arbitrary order. + /// The iterator element type is `&'a mut V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// for val in map.values_mut() { + /// *val = *val + 10; + /// } + /// + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in map.values() { + /// println!("{}", val); + /// vec.push(*val); + /// } + /// + /// // The `Values` iterator produces values in arbitrary order, so the + /// // values must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [11, 12, 13]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + ValuesMut { + inner: self.iter_mut(), + } + } + + /// An iterator visiting all key-value pairs in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec<(&str, i32)> = Vec::new(); + /// + /// for (key, val) in map.iter() { + /// println!("key: {} val: {}", key, val); + /// vec.push((*key, *val)); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3)]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> Iter<'_, K, V> { + // Here we tie the lifetime of self to the iter. + unsafe { + Iter { + inner: self.table.iter(), + marker: PhantomData, + } + } + } + + /// An iterator visiting all key-value pairs in arbitrary order, + /// with mutable references to the values. + /// The iterator element type is `(&'a K, &'a mut V)`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// // Update all values + /// for (_, val) in map.iter_mut() { + /// *val *= 2; + /// } + /// + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec<(&str, i32)> = Vec::new(); + /// + /// for (key, val) in &map { + /// println!("key: {} val: {}", key, val); + /// vec.push((*key, *val)); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [("a", 2), ("b", 4), ("c", 6)]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + // Here we tie the lifetime of self to the iter. + unsafe { + IterMut { + inner: self.table.iter(), + marker: PhantomData, + } + } + } + + #[cfg(test)] + #[cfg_attr(feature = "inline-more", inline)] + fn raw_capacity(&self) -> usize { + self.table.buckets() + } + + /// Returns the number of elements in the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert_eq!(a.len(), 0); + /// a.insert(1, "a"); + /// assert_eq!(a.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.table.len() + } + + /// Returns `true` if the map contains no elements. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert!(a.is_empty()); + /// a.insert(1, "a"); + /// assert!(!a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Clears the map, returning all key-value pairs as an iterator. Keeps the + /// allocated memory for reuse. + /// + /// If the returned iterator is dropped before being fully consumed, it + /// drops the remaining key-value pairs. The returned iterator keeps a + /// mutable borrow on the vector to optimize its implementation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// let capacity_before_drain = a.capacity(); + /// + /// for (k, v) in a.drain().take(1) { + /// assert!(k == 1 || k == 2); + /// assert!(v == "a" || v == "b"); + /// } + /// + /// // As we can see, the map is empty and contains no element. + /// assert!(a.is_empty() && a.len() == 0); + /// // But map capacity is equal to old one. + /// assert_eq!(a.capacity(), capacity_before_drain); + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// + /// { // Iterator is dropped without being consumed. + /// let d = a.drain(); + /// } + /// + /// // But the map is empty even if we do not use Drain iterator. + /// assert!(a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> Drain<'_, K, V, A> { + Drain { + inner: self.table.drain(), + } + } + + /// Retains only the elements specified by the predicate. Keeps the + /// allocated memory for reuse. + /// + /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)` returns `false`. + /// The elements are visited in unsorted (and unspecified) order. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); + /// assert_eq!(map.len(), 8); + /// + /// map.retain(|&k, _| k % 2 == 0); + /// + /// // We can see, that the number of elements inside map is changed. + /// assert_eq!(map.len(), 4); + /// + /// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect(); + /// vec.sort_unstable(); + /// assert_eq!(vec, [(0, 0), (2, 20), (4, 40), (6, 60)]); + /// ``` + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + // Here we only use `iter` as a temporary, preventing use-after-free + unsafe { + for item in self.table.iter() { + let &mut (ref key, ref mut value) = item.as_mut(); + if !f(key, value) { + self.table.erase(item); + } + } + } + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out + /// into another iterator. + /// + /// Note that `extract_if` lets you mutate every value in the filter closure, regardless of + /// whether you choose to keep or remove it. + /// + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// + /// Keeps the allocated memory for reuse. + /// + /// [`retain()`]: HashMap::retain + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); + /// + /// let drained: HashMap = map.extract_if(|k, _v| k % 2 == 0).collect(); + /// + /// let mut evens = drained.keys().cloned().collect::>(); + /// let mut odds = map.keys().cloned().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// + /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); + /// + /// { // Iterator is dropped without being consumed. + /// let d = map.extract_if(|k, _v| k % 2 != 0); + /// } + /// + /// // ExtractIf was not exhausted, therefore no elements were drained. + /// assert_eq!(map.len(), 8); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, K, V, F, A> + where + F: FnMut(&K, &mut V) -> bool, + { + ExtractIf { + f, + inner: RawExtractIf { + iter: unsafe { self.table.iter() }, + table: &mut self.table, + }, + } + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// let capacity_before_clear = a.capacity(); + /// + /// a.clear(); + /// + /// // Map is empty. + /// assert!(a.is_empty()); + /// // But map capacity is equal to old one. + /// assert_eq!(a.capacity(), capacity_before_clear); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + self.table.clear(); + } + + /// Creates a consuming iterator visiting all the keys in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `K`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// let mut vec: Vec<&str> = map.into_keys().collect(); + /// + /// // The `IntoKeys` iterator produces keys in arbitrary order, so the + /// // keys must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, ["a", "b", "c"]); + /// ``` + #[inline] + pub fn into_keys(self) -> IntoKeys { + IntoKeys { + inner: self.into_iter(), + } + } + + /// Creates a consuming iterator visiting all the values in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// let mut vec: Vec = map.into_values().collect(); + /// + /// // The `IntoValues` iterator produces values in arbitrary order, so + /// // the values must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [1, 2, 3]); + /// ``` + #[inline] + pub fn into_values(self) -> IntoValues { + IntoValues { + inner: self.into_iter(), + } + } +} + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashMap::try_reserve) instead + /// if you want to handle memory allocation failure. + /// + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// // Map is empty and doesn't allocate memory + /// assert_eq!(map.capacity(), 0); + /// + /// map.reserve(10); + /// + /// // And now map can hold at least 10 elements + /// assert!(map.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize) { + self.table + .reserve(additional, make_hasher::<_, V, S>(&self.hash_builder)); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, isize> = HashMap::new(); + /// // Map is empty and doesn't allocate memory + /// assert_eq!(map.capacity(), 0); + /// + /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// + /// // And now map can hold at least 10 elements + /// assert!(map.capacity() >= 10); + /// ``` + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned: + /// ``` + /// # fn test() { + /// use hashbrown::HashMap; + /// use hashbrown::TryReserveError; + /// let mut map: HashMap = HashMap::new(); + /// + /// match map.try_reserve(usize::MAX) { + /// Err(error) => match error { + /// TryReserveError::CapacityOverflow => {} + /// _ => panic!("TryReserveError::AllocError ?"), + /// }, + /// _ => panic!(), + /// } + /// # } + /// # fn main() { + /// # #[cfg(not(miri))] + /// # test() + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.table + .try_reserve(additional, make_hasher::<_, V, S>(&self.hash_builder)) + } + + /// Shrinks the capacity of the map as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::with_capacity(100); + /// map.insert(1, 2); + /// map.insert(3, 4); + /// assert!(map.capacity() >= 100); + /// map.shrink_to_fit(); + /// assert!(map.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_fit(&mut self) { + self.table + .shrink_to(0, make_hasher::<_, V, S>(&self.hash_builder)); + } + + /// Shrinks the capacity of the map with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// This function does nothing if the current capacity is smaller than the + /// supplied minimum capacity. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::with_capacity(100); + /// map.insert(1, 2); + /// map.insert(3, 4); + /// assert!(map.capacity() >= 100); + /// map.shrink_to(10); + /// assert!(map.capacity() >= 10); + /// map.shrink_to(0); + /// assert!(map.capacity() >= 2); + /// map.shrink_to(10); + /// assert!(map.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.table + .shrink_to(min_capacity, make_hasher::<_, V, S>(&self.hash_builder)); + } + + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut letters = HashMap::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// let counter = letters.entry(ch).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(letters[&'s'], 2); + /// assert_eq!(letters[&'t'], 3); + /// assert_eq!(letters[&'u'], 1); + /// assert_eq!(letters.get(&'y'), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> { + let hash = make_hash::(&self.hash_builder, &key); + if let Some(elem) = self.table.find(hash, equivalent_key(&key)) { + Entry::Occupied(OccupiedEntry { + hash, + elem, + table: self, + }) + } else { + Entry::Vacant(VacantEntry { + hash, + key, + table: self, + }) + } + } + + /// Gets the given key's corresponding entry by reference in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut words: HashMap = HashMap::new(); + /// let source = ["poneyland", "horseyland", "poneyland", "poneyland"]; + /// for (i, &s) in source.iter().enumerate() { + /// let counter = words.entry_ref(s).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(words["poneyland"], 3); + /// assert_eq!(words["horseyland"], 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry_ref<'a, 'b, Q>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A> + where + Q: Hash + Equivalent + ?Sized, + { + let hash = make_hash::(&self.hash_builder, key); + if let Some(elem) = self.table.find(hash, equivalent_key(key)) { + EntryRef::Occupied(OccupiedEntry { + hash, + elem, + table: self, + }) + } else { + EntryRef::Vacant(VacantEntryRef { + hash, + key, + table: self, + }) + } + } + + /// Returns a reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get(&1), Some(&"a")); + /// assert_eq!(map.get(&2), None); + /// ``` + #[inline] + pub fn get(&self, k: &Q) -> Option<&V> + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner(k) { + Some((_, v)) => Some(v), + None => None, + } + } + + /// Returns the key-value pair corresponding to the supplied key. + /// + /// The supplied key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); + /// assert_eq!(map.get_key_value(&2), None); + /// ``` + #[inline] + pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner(k) { + Some((key, value)) => Some((key, value)), + None => None, + } + } + + #[inline] + fn get_inner(&self, k: &Q) -> Option<&(K, V)> + where + Q: Hash + Equivalent + ?Sized, + { + if self.table.is_empty() { + None + } else { + let hash = make_hash::(&self.hash_builder, k); + self.table.get(hash, equivalent_key(k)) + } + } + + /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. + /// + /// The supplied key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// let (k, v) = map.get_key_value_mut(&1).unwrap(); + /// assert_eq!(k, &1); + /// assert_eq!(v, &mut "a"); + /// *v = "b"; + /// assert_eq!(map.get_key_value_mut(&1), Some((&1, &mut "b"))); + /// assert_eq!(map.get_key_value_mut(&2), None); + /// ``` + #[inline] + pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner_mut(k) { + Some(&mut (ref key, ref mut value)) => Some((key, value)), + None => None, + } + } + + /// Returns `true` if the map contains a value for the specified key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.contains_key(&1), true); + /// assert_eq!(map.contains_key(&2), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn contains_key(&self, k: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.get_inner(k).is_some() + } + + /// Returns a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// if let Some(x) = map.get_mut(&1) { + /// *x = "b"; + /// } + /// assert_eq!(map[&1], "b"); + /// + /// assert_eq!(map.get_mut(&2), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner_mut(k) { + Some(&mut (_, ref mut v)) => Some(v), + None => None, + } + } + + #[inline] + fn get_inner_mut(&mut self, k: &Q) -> Option<&mut (K, V)> + where + Q: Hash + Equivalent + ?Sized, + { + if self.table.is_empty() { + None + } else { + let hash = make_hash::(&self.hash_builder, k); + self.table.get_mut(hash, equivalent_key(k)) + } + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be used if the key is missing. + /// + /// # Panics + /// + /// Panics if any keys are overlapping. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// // Get Athenæum and Bodleian Library + /// let [Some(a), Some(b)] = libraries.get_many_mut([ + /// "Athenæum", + /// "Bodleian Library", + /// ]) else { panic!() }; + /// + /// // Assert values of Athenæum and Library of Congress + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "Library of Congress", + /// ]); + /// assert_eq!( + /// got, + /// [ + /// Some(&mut 1807), + /// Some(&mut 1800), + /// ], + /// ); + /// + /// // Missing keys result in None + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "New York Public Library", + /// ]); + /// assert_eq!( + /// got, + /// [ + /// Some(&mut 1807), + /// None + /// ] + /// ); + /// ``` + /// + /// ```should_panic + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Athenæum".to_string(), 1807); + /// + /// // Duplicate keys panic! + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "Athenæum", + /// ]); + /// ``` + pub fn get_many_mut(&mut self, ks: [&Q; N]) -> [Option<&'_ mut V>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v)) + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// Returns an array of length `N` with the results of each query. `None` will be used if + /// the key is missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashMap::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// // SAFETY: The keys do not overlap. + /// let [Some(a), Some(b)] = (unsafe { libraries.get_many_unchecked_mut([ + /// "Athenæum", + /// "Bodleian Library", + /// ]) }) else { panic!() }; + /// + /// // SAFETY: The keys do not overlap. + /// let got = unsafe { libraries.get_many_unchecked_mut([ + /// "Athenæum", + /// "Library of Congress", + /// ]) }; + /// assert_eq!( + /// got, + /// [ + /// Some(&mut 1807), + /// Some(&mut 1800), + /// ], + /// ); + /// + /// // SAFETY: The keys do not overlap. + /// let got = unsafe { libraries.get_many_unchecked_mut([ + /// "Athenæum", + /// "New York Public Library", + /// ]) }; + /// // Missing keys result in None + /// assert_eq!(got, [Some(&mut 1807), None]); + /// ``` + pub unsafe fn get_many_unchecked_mut( + &mut self, + ks: [&Q; N], + ) -> [Option<&'_ mut V>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.get_many_unchecked_mut_inner(ks) + .map(|res| res.map(|(_, v)| v)) + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be used if the key is missing. + /// + /// # Panics + /// + /// Panics if any keys are overlapping. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// ]); + /// assert_eq!( + /// got, + /// [ + /// Some((&"Bodleian Library".to_string(), &mut 1602)), + /// Some((&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691)), + /// ], + /// ); + /// // Missing keys result in None + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Gewandhaus", + /// ]); + /// assert_eq!(got, [Some((&"Bodleian Library".to_string(), &mut 1602)), None]); + /// ``` + /// + /// ```should_panic + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// + /// // Duplicate keys result in panic! + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// ]); + /// ``` + pub fn get_many_key_value_mut( + &mut self, + ks: [&Q; N], + ) -> [Option<(&'_ K, &'_ mut V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.get_many_mut_inner(ks) + .map(|res| res.map(|(k, v)| (&*k, v))) + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys, without validating that the values are unique. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_key_value_mut`](`HashMap::get_many_key_value_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// ]); + /// assert_eq!( + /// got, + /// [ + /// Some((&"Bodleian Library".to_string(), &mut 1602)), + /// Some((&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691)), + /// ], + /// ); + /// // Missing keys result in None + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Gewandhaus", + /// ]); + /// assert_eq!( + /// got, + /// [ + /// Some((&"Bodleian Library".to_string(), &mut 1602)), + /// None, + /// ], + /// ); + /// ``` + pub unsafe fn get_many_key_value_unchecked_mut( + &mut self, + ks: [&Q; N], + ) -> [Option<(&'_ K, &'_ mut V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.get_many_unchecked_mut_inner(ks) + .map(|res| res.map(|(k, v)| (&*k, v))) + } + + fn get_many_mut_inner(&mut self, ks: [&Q; N]) -> [Option<&'_ mut (K, V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + let hashes = self.build_hashes_inner(ks); + self.table + .get_many_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) + } + + unsafe fn get_many_unchecked_mut_inner( + &mut self, + ks: [&Q; N], + ) -> [Option<&'_ mut (K, V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + let hashes = self.build_hashes_inner(ks); + self.table + .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) + } + + fn build_hashes_inner(&self, ks: [&Q; N]) -> [u64; N] + where + Q: Hash + Equivalent + ?Sized, + { + let mut hashes = [0_u64; N]; + for i in 0..N { + hashes[i] = make_hash::(&self.hash_builder, ks[i]); + } + hashes + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not have this key present, [`None`] is returned. + /// + /// If the map did have this key present, the value is updated, and the old + /// value is returned. The key is not updated, though; this matters for + /// types that can be `==` without being identical. See the [`std::collections`] + /// [module-level documentation] for more. + /// + /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None + /// [`std::collections`]: https://doc.rust-lang.org/std/collections/index.html + /// [module-level documentation]: https://doc.rust-lang.org/std/collections/index.html#insert-and-complex-keys + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// assert_eq!(map.insert(37, "a"), None); + /// assert_eq!(map.is_empty(), false); + /// + /// map.insert(37, "b"); + /// assert_eq!(map.insert(37, "c"), Some("b")); + /// assert_eq!(map[&37], "c"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, k: K, v: V) -> Option { + let hash = make_hash::(&self.hash_builder, &k); + match self.find_or_find_insert_slot(hash, &k) { + Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().1 }, v)), + Err(slot) => { + unsafe { + self.table.insert_in_slot(hash, slot, (k, v)); + } + None + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn find_or_find_insert_slot( + &mut self, + hash: u64, + key: &Q, + ) -> Result, crate::raw::InsertSlot> + where + Q: Equivalent + ?Sized, + { + self.table.find_or_find_insert_slot( + hash, + equivalent_key(key), + make_hasher(&self.hash_builder), + ) + } + + /// Insert a key-value pair into the map without checking + /// if the key already exists in the map. + /// + /// This operation is faster than regular insert, because it does not perform + /// lookup before insertion. + /// + /// This operation is useful during initial population of the map. + /// For example, when constructing a map from another map, we know + /// that keys are unique. + /// + /// Returns a reference to the key and value just inserted. + /// + /// # Safety + /// + /// This operation is safe if a key does not exist in the map. + /// + /// However, if a key exists in the map already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the map + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// However this operation is still unsafe because the resulting `HashMap` + /// may be passed to unsafe code which does expect the map to behave + /// correctly, and would cause unsoundness as a result. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map1 = HashMap::new(); + /// assert_eq!(map1.insert(1, "a"), None); + /// assert_eq!(map1.insert(2, "b"), None); + /// assert_eq!(map1.insert(3, "c"), None); + /// assert_eq!(map1.len(), 3); + /// + /// let mut map2 = HashMap::new(); + /// + /// for (key, value) in map1.into_iter() { + /// unsafe { + /// map2.insert_unique_unchecked(key, value); + /// } + /// } + /// + /// let (key, value) = unsafe { map2.insert_unique_unchecked(4, "d") }; + /// assert_eq!(key, &4); + /// assert_eq!(value, &mut "d"); + /// *value = "e"; + /// + /// assert_eq!(map2[&1], "a"); + /// assert_eq!(map2[&2], "b"); + /// assert_eq!(map2[&3], "c"); + /// assert_eq!(map2[&4], "e"); + /// assert_eq!(map2.len(), 4); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) { + let hash = make_hash::(&self.hash_builder, &k); + let bucket = self + .table + .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder)); + let (k_ref, v_ref) = unsafe { bucket.as_mut() }; + (k_ref, v_ref) + } + + /// Tries to insert a key-value pair into the map, and returns + /// a mutable reference to the value in the entry. + /// + /// # Errors + /// + /// If the map already had this key present, nothing is updated, and + /// an error containing the occupied entry and the value is returned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::OccupiedError; + /// + /// let mut map = HashMap::new(); + /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a"); + /// + /// match map.try_insert(37, "b") { + /// Err(OccupiedError { entry, value }) => { + /// assert_eq!(entry.key(), &37); + /// assert_eq!(entry.get(), &"a"); + /// assert_eq!(value, "b"); + /// } + /// _ => panic!() + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_insert( + &mut self, + key: K, + value: V, + ) -> Result<&mut V, OccupiedError<'_, K, V, S, A>> { + match self.entry(key) { + Entry::Occupied(entry) => Err(OccupiedError { entry, value }), + Entry::Vacant(entry) => Ok(entry.insert(value)), + } + } + + /// Removes a key from the map, returning the value at the key if the key + /// was previously in the map. Keeps the allocated memory for reuse. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.insert(1, "a"); + /// + /// assert_eq!(map.remove(&1), Some("a")); + /// assert_eq!(map.remove(&1), None); + /// + /// // Now map holds none elements + /// assert!(map.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(&mut self, k: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.remove_entry(k) { + Some((_, v)) => Some(v), + None => None, + } + } + + /// Removes a key from the map, returning the stored key and value if the + /// key was previously in the map. Keeps the allocated memory for reuse. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.insert(1, "a"); + /// + /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); + /// assert_eq!(map.remove(&1), None); + /// + /// // Now map hold none elements + /// assert!(map.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> + where + Q: Hash + Equivalent + ?Sized, + { + let hash = make_hash::(&self.hash_builder, k); + self.table.remove_entry(hash, equivalent_key(k)) + } + + /// Returns the total amount of memory allocated internally by the hash + /// set, in bytes. + /// + /// The returned number is informational only. It is intended to be + /// primarily used for memory profiling. + #[inline] + pub fn allocation_size(&self) -> usize { + self.table.allocation_size() + } +} + +impl PartialEq for HashMap +where + K: Eq + Hash, + V: PartialEq, + S: BuildHasher, + A: Allocator, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl Eq for HashMap +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, + A: Allocator, +{ +} + +impl Debug for HashMap +where + K: Debug, + V: Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl Default for HashMap +where + S: Default, + A: Default + Allocator, +{ + /// Creates an empty `HashMap`, with the `Default` value for the hasher and allocator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use std::collections::hash_map::RandomState; + /// + /// // You can specify all types of HashMap, including hasher and allocator. + /// // Created map is empty and don't allocate memory + /// let map: HashMap = Default::default(); + /// assert_eq!(map.capacity(), 0); + /// let map: HashMap = HashMap::default(); + /// assert_eq!(map.capacity(), 0); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self::with_hasher_in(Default::default(), Default::default()) + } +} + +impl Index<&Q> for HashMap +where + K: Eq + Hash, + Q: Hash + Equivalent + ?Sized, + S: BuildHasher, + A: Allocator, +{ + type Output = V; + + /// Returns a reference to the value corresponding to the supplied key. + /// + /// # Panics + /// + /// Panics if the key is not present in the `HashMap`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map: HashMap<_, _> = [("a", "One"), ("b", "Two")].into(); + /// + /// assert_eq!(map[&"a"], "One"); + /// assert_eq!(map[&"b"], "Two"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn index(&self, key: &Q) -> &V { + self.get(key).expect("no entry found for key") + } +} + +// The default hasher is used to match the std implementation signature +#[cfg(feature = "default-hasher")] +impl From<[(K, V); N]> for HashMap +where + K: Eq + Hash, + A: Default + Allocator, +{ + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map1 = HashMap::from([(1, 2), (3, 4)]); + /// let map2: HashMap<_, _> = [(1, 2), (3, 4)].into(); + /// assert_eq!(map1, map2); + /// ``` + fn from(arr: [(K, V); N]) -> Self { + arr.into_iter().collect() + } +} + +/// An iterator over the entries of a `HashMap` in arbitrary order. +/// The iterator element type is `(&'a K, &'a V)`. +/// +/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.HashMap.html#method.iter +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut iter = map.iter(); +/// let mut vec = vec![iter.next(), iter.next(), iter.next()]; +/// +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((&1, &"a")), Some((&2, &"b")), Some((&3, &"c"))]); +/// +/// // It is fused iterator +/// assert_eq!(iter.next(), None); +/// assert_eq!(iter.next(), None); +/// ``` +pub struct Iter<'a, K, V> { + inner: RawIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for Iter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the entries of a `HashMap` in arbitrary order. +/// The iterator element type is `(&'a K, &'a mut V)`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.HashMap.html#method.iter_mut +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap<_, _> = [(1, "One".to_owned()), (2, "Two".into())].into(); +/// +/// let mut iter = map.iter_mut(); +/// iter.next().map(|(_, v)| v.push_str(" Mississippi")); +/// iter.next().map(|(_, v)| v.push_str(" Mississippi")); +/// +/// // It is fused iterator +/// assert_eq!(iter.next(), None); +/// assert_eq!(iter.next(), None); +/// +/// assert_eq!(map.get(&1).unwrap(), &"One Mississippi".to_owned()); +/// assert_eq!(map.get(&2).unwrap(), &"Two Mississippi".to_owned()); +/// ``` +pub struct IterMut<'a, K, V> { + inner: RawIter<(K, V)>, + // To ensure invariance with respect to V + marker: PhantomData<(&'a K, &'a mut V)>, +} + +// We override the default Send impl which has K: Sync instead of K: Send. Both +// are correct, but this one is more general since it allows keys which +// implement Send but not Sync. +unsafe impl Send for IterMut<'_, K, V> {} + +impl IterMut<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +/// An owning iterator over the entries of a `HashMap` in arbitrary order. +/// The iterator element type is `(K, V)`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashMap`] +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +/// The map cannot be used after calling that method. +/// +/// [`into_iter`]: struct.HashMap.html#method.into_iter +/// [`HashMap`]: struct.HashMap.html +/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut iter = map.into_iter(); +/// let mut vec = vec![iter.next(), iter.next(), iter.next()]; +/// +/// // The `IntoIter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((1, "a")), Some((2, "b")), Some((3, "c"))]); +/// +/// // It is fused iterator +/// assert_eq!(iter.next(), None); +/// assert_eq!(iter.next(), None); +/// ``` +pub struct IntoIter { + inner: RawIntoIter<(K, V), A>, +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +/// An owning iterator over the keys of a `HashMap` in arbitrary order. +/// The iterator element type is `K`. +/// +/// This `struct` is created by the [`into_keys`] method on [`HashMap`]. +/// See its documentation for more. +/// The map cannot be used after calling that method. +/// +/// [`into_keys`]: struct.HashMap.html#method.into_keys +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut keys = map.into_keys(); +/// let mut vec = vec![keys.next(), keys.next(), keys.next()]; +/// +/// // The `IntoKeys` iterator produces keys in arbitrary order, so the +/// // keys must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some(1), Some(2), Some(3)]); +/// +/// // It is fused iterator +/// assert_eq!(keys.next(), None); +/// assert_eq!(keys.next(), None); +/// ``` +pub struct IntoKeys { + inner: IntoIter, +} + +impl Default for IntoKeys { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} +impl Iterator for IntoKeys { + type Item = K; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next().map(|(k, _)| k) + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[inline] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (k, _)| f(acc, k)) + } +} + +impl ExactSizeIterator for IntoKeys { + #[inline] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IntoKeys {} + +impl fmt::Debug for IntoKeys { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.inner.iter().map(|(k, _)| k)) + .finish() + } +} + +/// An owning iterator over the values of a `HashMap` in arbitrary order. +/// The iterator element type is `V`. +/// +/// This `struct` is created by the [`into_values`] method on [`HashMap`]. +/// See its documentation for more. The map cannot be used after calling that method. +/// +/// [`into_values`]: struct.HashMap.html#method.into_values +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut values = map.into_values(); +/// let mut vec = vec![values.next(), values.next(), values.next()]; +/// +/// // The `IntoValues` iterator produces values in arbitrary order, so +/// // the values must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some("a"), Some("b"), Some("c")]); +/// +/// // It is fused iterator +/// assert_eq!(values.next(), None); +/// assert_eq!(values.next(), None); +/// ``` +pub struct IntoValues { + inner: IntoIter, +} + +impl Default for IntoValues { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} +impl Iterator for IntoValues { + type Item = V; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next().map(|(_, v)| v) + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[inline] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (_, v)| f(acc, v)) + } +} + +impl ExactSizeIterator for IntoValues { + #[inline] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IntoValues {} + +impl fmt::Debug for IntoValues { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.inner.iter().map(|(_, v)| v)) + .finish() + } +} + +/// An iterator over the keys of a `HashMap` in arbitrary order. +/// The iterator element type is `&'a K`. +/// +/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`keys`]: struct.HashMap.html#method.keys +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut keys = map.keys(); +/// let mut vec = vec![keys.next(), keys.next(), keys.next()]; +/// +/// // The `Keys` iterator produces keys in arbitrary order, so the +/// // keys must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some(&1), Some(&2), Some(&3)]); +/// +/// // It is fused iterator +/// assert_eq!(keys.next(), None); +/// assert_eq!(keys.next(), None); +/// ``` +pub struct Keys<'a, K, V> { + inner: Iter<'a, K, V>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Keys { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for Keys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// An iterator over the values of a `HashMap` in arbitrary order. +/// The iterator element type is `&'a V`. +/// +/// This `struct` is created by the [`values`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`values`]: struct.HashMap.html#method.values +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut values = map.values(); +/// let mut vec = vec![values.next(), values.next(), values.next()]; +/// +/// // The `Values` iterator produces values in arbitrary order, so the +/// // values must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some(&"a"), Some(&"b"), Some(&"c")]); +/// +/// // It is fused iterator +/// assert_eq!(values.next(), None); +/// assert_eq!(values.next(), None); +/// ``` +pub struct Values<'a, K, V> { + inner: Iter<'a, K, V>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Values { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for Values<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A draining iterator over the entries of a `HashMap` in arbitrary +/// order. The iterator element type is `(K, V)`. +/// +/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`drain`]: struct.HashMap.html#method.drain +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut drain_iter = map.drain(); +/// let mut vec = vec![drain_iter.next(), drain_iter.next(), drain_iter.next()]; +/// +/// // The `Drain` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((1, "a")), Some((2, "b")), Some((3, "c"))]); +/// +/// // It is fused iterator +/// assert_eq!(drain_iter.next(), None); +/// assert_eq!(drain_iter.next(), None); +/// ``` +pub struct Drain<'a, K, V, A: Allocator = Global> { + inner: RawDrain<'a, (K, V), A>, +} + +impl Drain<'_, K, V, A> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +/// A draining iterator over entries of a `HashMap` which don't satisfy the predicate +/// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`. +/// +/// This `struct` is created by the [`extract_if`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`extract_if`]: struct.HashMap.html#method.extract_if +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut extract_if = map.extract_if(|k, _v| k % 2 != 0); +/// let mut vec = vec![extract_if.next(), extract_if.next()]; +/// +/// // The `ExtractIf` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]); +/// +/// // It is fused iterator +/// assert_eq!(extract_if.next(), None); +/// assert_eq!(extract_if.next(), None); +/// drop(extract_if); +/// +/// assert_eq!(map.len(), 1); +/// ``` +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, K, V, F, A: Allocator = Global> +where + F: FnMut(&K, &mut V) -> bool, +{ + f: F, + inner: RawExtractIf<'a, (K, V), A>, +} + +impl Iterator for ExtractIf<'_, K, V, F, A> +where + F: FnMut(&K, &mut V) -> bool, + A: Allocator, +{ + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + self.inner.next(|&mut (ref k, ref mut v)| (self.f)(k, v)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} + +/// A mutable iterator over the values of a `HashMap` in arbitrary order. +/// The iterator element type is `&'a mut V`. +/// +/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`values_mut`]: struct.HashMap.html#method.values_mut +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap<_, _> = [(1, "One".to_owned()), (2, "Two".into())].into(); +/// +/// let mut values = map.values_mut(); +/// values.next().map(|v| v.push_str(" Mississippi")); +/// values.next().map(|v| v.push_str(" Mississippi")); +/// +/// // It is fused iterator +/// assert_eq!(values.next(), None); +/// assert_eq!(values.next(), None); +/// +/// assert_eq!(map.get(&1).unwrap(), &"One Mississippi".to_owned()); +/// assert_eq!(map.get(&2).unwrap(), &"Two Mississippi".to_owned()); +/// ``` +pub struct ValuesMut<'a, K, V> { + inner: IterMut<'a, K, V>, +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashMap`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`entry`]: struct.HashMap.html#method.entry +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{Entry, HashMap, OccupiedEntry}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a", 10), ("b", 20), ("c", 30)]); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert) +/// let entry: Entry<_, _, _> = map.entry("a"); +/// let _raw_o: OccupiedEntry<_, _, _> = entry.insert(1); +/// assert_eq!(map.len(), 3); +/// // Nonexistent key (insert) +/// map.entry("d").insert(4); +/// +/// // Existing key (or_insert) +/// let v = map.entry("b").or_insert(2); +/// assert_eq!(std::mem::replace(v, 2), 20); +/// // Nonexistent key (or_insert) +/// map.entry("e").or_insert(5); +/// +/// // Existing key (or_insert_with) +/// let v = map.entry("c").or_insert_with(|| 3); +/// assert_eq!(std::mem::replace(v, 3), 30); +/// // Nonexistent key (or_insert_with) +/// map.entry("f").or_insert_with(|| 6); +/// +/// println!("Our HashMap: {:?}", map); +/// +/// let mut vec: Vec<_> = map.iter().map(|(&k, &v)| (k, v)).collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3), ("d", 4), ("e", 5), ("f", 6)]); +/// ``` +pub enum Entry<'a, K, V, S, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// let mut map: HashMap<_, _> = [("a", 100), ("b", 200)].into(); + /// + /// match map.entry("a") { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => { } + /// } + /// ``` + Occupied(OccupiedEntry<'a, K, V, S, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// + /// match map.entry("a") { + /// Entry::Occupied(_) => unreachable!(), + /// Entry::Vacant(_) => { } + /// } + /// ``` + Vacant(VacantEntry<'a, K, V, S, A>), +} + +impl Debug for Entry<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a [`HashMap`]. +/// It is part of the [`Entry`] and [`EntryRef`] enums. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{Entry, HashMap, OccupiedEntry}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a", 10), ("b", 20), ("c", 30)]); +/// +/// let _entry_o: OccupiedEntry<_, _, _> = map.entry("a").insert(100); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert and update) +/// match map.entry("a") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(mut view) => { +/// assert_eq!(view.get(), &100); +/// let v = view.get_mut(); +/// *v *= 10; +/// assert_eq!(view.insert(1111), 1000); +/// } +/// } +/// +/// assert_eq!(map[&"a"], 1111); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (take) +/// match map.entry("c") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("c", 30)); +/// } +/// } +/// assert_eq!(map.get(&"c"), None); +/// assert_eq!(map.len(), 2); +/// ``` +pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { + hash: u64, + elem: Bucket<(K, V)>, + table: &'a mut HashMap, +} + +unsafe impl Send for OccupiedEntry<'_, K, V, S, A> +where + K: Send, + V: Send, + S: Send, + A: Send + Allocator, +{ +} +unsafe impl Sync for OccupiedEntry<'_, K, V, S, A> +where + K: Sync, + V: Sync, + S: Sync, + A: Sync + Allocator, +{ +} + +impl Debug for OccupiedEntry<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{Entry, HashMap, VacantEntry}; +/// +/// let mut map = HashMap::<&str, i32>::new(); +/// +/// let entry_v: VacantEntry<_, _, _> = match map.entry("a") { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert(10); +/// assert!(map[&"a"] == 10 && map.len() == 1); +/// +/// // Nonexistent key (insert and update) +/// match map.entry("b") { +/// Entry::Occupied(_) => unreachable!(), +/// Entry::Vacant(view) => { +/// let value = view.insert(2); +/// assert_eq!(*value, 2); +/// *value = 20; +/// } +/// } +/// assert!(map[&"b"] == 20 && map.len() == 2); +/// ``` +pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { + hash: u64, + key: K, + table: &'a mut HashMap, +} + +impl Debug for VacantEntry<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied, +/// with any borrowed form of the map's key type. +/// +/// +/// This `enum` is constructed from the [`entry_ref`] method on [`HashMap`]. +/// +/// [`Hash`] and [`Eq`] on the borrowed form of the map's key type *must* match those +/// for the key type. It also require that key may be constructed from the borrowed +/// form through the [`From`] trait. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`entry_ref`]: struct.HashMap.html#method.entry_ref +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{EntryRef, HashMap, OccupiedEntry}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a".to_owned(), 10), ("b".into(), 20), ("c".into(), 30)]); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert) +/// let key = String::from("a"); +/// let entry: EntryRef<_, _, _, _> = map.entry_ref(&key); +/// let _raw_o: OccupiedEntry<_, _, _, _> = entry.insert(1); +/// assert_eq!(map.len(), 3); +/// // Nonexistent key (insert) +/// map.entry_ref("d").insert(4); +/// +/// // Existing key (or_insert) +/// let v = map.entry_ref("b").or_insert(2); +/// assert_eq!(std::mem::replace(v, 2), 20); +/// // Nonexistent key (or_insert) +/// map.entry_ref("e").or_insert(5); +/// +/// // Existing key (or_insert_with) +/// let v = map.entry_ref("c").or_insert_with(|| 3); +/// assert_eq!(std::mem::replace(v, 3), 30); +/// // Nonexistent key (or_insert_with) +/// map.entry_ref("f").or_insert_with(|| 6); +/// +/// println!("Our HashMap: {:?}", map); +/// +/// for (key, value) in ["a", "b", "c", "d", "e", "f"].into_iter().zip(1..=6) { +/// assert_eq!(map[key], value) +/// } +/// assert_eq!(map.len(), 6); +/// ``` +pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// let mut map: HashMap<_, _> = [("a".to_owned(), 100), ("b".into(), 200)].into(); + /// + /// match map.entry_ref("a") { + /// EntryRef::Vacant(_) => unreachable!(), + /// EntryRef::Occupied(_) => { } + /// } + /// ``` + Occupied(OccupiedEntry<'a, K, V, S, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// let mut map: HashMap = HashMap::new(); + /// + /// match map.entry_ref("a") { + /// EntryRef::Occupied(_) => unreachable!(), + /// EntryRef::Vacant(_) => { } + /// } + /// ``` + Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), +} + +impl Debug for EntryRef<'_, '_, K, Q, V, S, A> +where + K: Debug + Borrow, + Q: Debug + ?Sized, + V: Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + EntryRef::Vacant(ref v) => f.debug_tuple("EntryRef").field(v).finish(), + EntryRef::Occupied(ref o) => f.debug_tuple("EntryRef").field(o).finish(), + } + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`EntryRef`] enum. +/// +/// [`EntryRef`]: enum.EntryRef.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{EntryRef, HashMap, VacantEntryRef}; +/// +/// let mut map = HashMap::::new(); +/// +/// let entry_v: VacantEntryRef<_, _, _, _> = match map.entry_ref("a") { +/// EntryRef::Vacant(view) => view, +/// EntryRef::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert(10); +/// assert!(map["a"] == 10 && map.len() == 1); +/// +/// // Nonexistent key (insert and update) +/// match map.entry_ref("b") { +/// EntryRef::Occupied(_) => unreachable!(), +/// EntryRef::Vacant(view) => { +/// let value = view.insert(2); +/// assert_eq!(*value, 2); +/// *value = 20; +/// } +/// } +/// assert!(map["b"] == 20 && map.len() == 2); +/// ``` +pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { + hash: u64, + key: &'b Q, + table: &'a mut HashMap, +} + +impl Debug for VacantEntryRef<'_, '_, K, Q, V, S, A> +where + K: Borrow, + Q: Debug + ?Sized, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntryRef").field(&self.key()).finish() + } +} + +/// The error returned by [`try_insert`](HashMap::try_insert) when the key already exists. +/// +/// Contains the occupied entry, and the value that was not inserted. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{HashMap, OccupiedError}; +/// +/// let mut map: HashMap<_, _> = [("a", 10), ("b", 20)].into(); +/// +/// // try_insert method returns mutable reference to the value if keys are vacant, +/// // but if the map did have key present, nothing is updated, and the provided +/// // value is returned inside `Err(_)` variant +/// match map.try_insert("a", 100) { +/// Err(OccupiedError { mut entry, value }) => { +/// assert_eq!(entry.key(), &"a"); +/// assert_eq!(value, 100); +/// assert_eq!(entry.insert(100), 10) +/// } +/// _ => unreachable!(), +/// } +/// assert_eq!(map[&"a"], 100); +/// ``` +pub struct OccupiedError<'a, K, V, S, A: Allocator = Global> { + /// The entry in the map that was already occupied. + pub entry: OccupiedEntry<'a, K, V, S, A>, + /// The value which was not inserted, because the entry was already occupied. + pub value: V, +} + +impl Debug for OccupiedError<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedError") + .field("key", self.entry.key()) + .field("old_value", self.entry.get()) + .field("new_value", &self.value) + .finish() + } +} + +impl fmt::Display for OccupiedError<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "failed to insert {:?}, key {:?} already exists with value {:?}", + self.value, + self.entry.key(), + self.entry.get(), + ) + } +} + +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a HashMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + + /// Creates an iterator over the entries of a `HashMap` in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// Return the same `Iter` struct as by the [`iter`] method on [`HashMap`]. + /// + /// [`iter`]: struct.HashMap.html#method.iter + /// [`HashMap`]: struct.HashMap.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let map_one: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); + /// let mut map_two = HashMap::new(); + /// + /// for (key, value) in &map_one { + /// println!("Key: {}, Value: {}", key, value); + /// map_two.insert(*key, *value); + /// } + /// + /// assert_eq!(map_one, map_two); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> Iter<'a, K, V> { + self.iter() + } +} + +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a mut HashMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + + /// Creates an iterator over the entries of a `HashMap` in arbitrary order + /// with mutable references to the values. The iterator element type is + /// `(&'a K, &'a mut V)`. + /// + /// Return the same `IterMut` struct as by the [`iter_mut`] method on + /// [`HashMap`]. + /// + /// [`iter_mut`]: struct.HashMap.html#method.iter_mut + /// [`HashMap`]: struct.HashMap.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<_, _> = [("a", 1), ("b", 2), ("c", 3)].into(); + /// + /// for (key, value) in &mut map { + /// println!("Key: {}, Value: {}", key, value); + /// *value *= 2; + /// } + /// + /// let mut vec = map.iter().collect::>(); + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(&"a", &2), (&"b", &4), (&"c", &6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IterMut<'a, K, V> { + self.iter_mut() + } +} + +impl IntoIterator for HashMap { + type Item = (K, V); + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves each key-value + /// pair out of the map in arbitrary order. The map cannot be used after + /// calling this. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map: HashMap<_, _> = [("a", 1), ("b", 2), ("c", 3)].into(); + /// + /// // Not possible with .iter() + /// let mut vec: Vec<(&str, i32)> = map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so + /// // the items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.table.into_iter(), + } + } +} + +impl Default for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + marker: PhantomData, + } + } +} +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(&'a K, &'a V)> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(x) => unsafe { + let r = x.as_ref(); + Some((&r.0, &r.1)) + }, + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, x| unsafe { + let (k, v) = x.as_ref(); + f(acc, (k, v)) + }) + } +} +impl ExactSizeIterator for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for Iter<'_, K, V> {} + +impl Default for IterMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + marker: PhantomData, + } + } +} +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(&'a K, &'a mut V)> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(x) => unsafe { + let r = x.as_mut(); + Some((&r.0, &mut r.1)) + }, + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, x| unsafe { + let (k, v) = x.as_mut(); + f(acc, (k, v)) + }) + } +} +impl ExactSizeIterator for IterMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for IterMut<'_, K, V> {} + +impl fmt::Debug for IterMut<'_, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl Default for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} +impl Iterator for IntoIter { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, f) + } +} +impl ExactSizeIterator for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl Default for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} +impl<'a, K, V> Iterator for Keys<'a, K, V> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a K> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (k, _)| f(acc, k)) + } +} +impl ExactSizeIterator for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Keys<'_, K, V> {} + +impl Default for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} +impl<'a, K, V> Iterator for Values<'a, K, V> { + type Item = &'a V; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a V> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((_, v)) => Some(v), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (_, v)| f(acc, v)) + } +} +impl ExactSizeIterator for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Values<'_, K, V> {} + +impl Default for ValuesMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + inner: Default::default(), + } + } +} +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a mut V> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((_, v)) => Some(v), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, |acc, (_, v)| f(acc, v)) + } +} +impl ExactSizeIterator for ValuesMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for ValuesMut<'_, K, V> {} + +impl fmt::Debug for ValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.inner.iter().map(|(_, val)| val)) + .finish() + } +} + +impl Iterator for Drain<'_, K, V, A> { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, f) + } +} +impl ExactSizeIterator for Drain<'_, K, V, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Drain<'_, K, V, A> {} + +impl fmt::Debug for Drain<'_, K, V, A> +where + K: fmt::Debug, + V: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl<'a, K, V, S, A: Allocator> Entry<'a, K, V, S, A> { + /// Sets the value of the entry, and returns an `OccupiedEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.entry("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), &"horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> OccupiedEntry<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(mut entry) => { + entry.insert(value); + entry + } + Entry::Vacant(entry) => entry.insert_entry(value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_insert_with(|| 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry("poneyland").or_insert_with(|| 10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default()), + } + } + + /// Ensures a value is in the entry by inserting, if empty, the result of the default function. + /// This method allows for generating key-derived values for insertion by providing the default + /// function a reference to the key that was moved during the `.entry(key)` method call. + /// + /// The reference to the moved key is provided so that cloning or copying the key is + /// unnecessary, unlike with `.or_insert_with(|| ... )`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, usize> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count()); + /// assert_eq!(map["poneyland"], 9); + /// + /// // existing key + /// *map.entry("poneyland").or_insert_with_key(|key| key.chars().count() * 10) *= 2; + /// assert_eq!(map["poneyland"], 18); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with_key V>(self, default: F) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let value = default(entry.key()); + entry.insert(value) + } + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(3); + /// // existing key + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// // nonexistent key + /// assert_eq!(map.entry("horseland").key(), &"horseland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// Entry::Vacant(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// } + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland", 42); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// Entry::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// Entry::Vacant(e) => assert_eq!(e.key(), &"poneyland"), + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&K, V) -> Option, + { + match self { + Entry::Occupied(entry) => entry.replace_entry_with(f), + Entry::Vacant(_) => self, + } + } +} + +impl<'a, K, V: Default, S, A: Allocator> Entry<'a, K, V, S, A> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, Option> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_default(); + /// assert_eq!(map["poneyland"], None); + /// + /// map.insert("horseland", Some(3)); + /// + /// // existing key + /// assert_eq!(map.entry("horseland").or_default(), &mut Some(3)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, K, V, S, A: Allocator> OccupiedEntry<'a, K, V, S, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// match map.entry("poneyland") { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.key(), &"poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Take the ownership of the key and value from the map. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// // We delete the entry from the map. + /// assert_eq!(o.remove_entry(), ("poneyland", 12)); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// // Now map hold none elements + /// assert!(map.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.table.remove(self.elem).0 } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// match map.entry("poneyland") { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &12), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same Entry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// + /// let value: &mut u32; + /// match map.entry("poneyland") { + /// Entry::Occupied(entry) => value = entry.into_mut(), + /// Entry::Vacant(_) => panic!(), + /// } + /// *value += 10; + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Takes the value out of the entry, and returns it. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// // Now map hold none elements + /// assert!(map.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.insert("poneyland", 42); + /// + /// let entry = match map.entry("poneyland") { + /// Entry::Occupied(e) => { + /// e.replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }) + /// } + /// Entry::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// Entry::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = match map.entry("poneyland") { + /// Entry::Occupied(e) => e.replace_entry_with(|_k, _v| None), + /// Entry::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// Entry::Vacant(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// } + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> Entry<'a, K, V, S, A> + where + F: FnOnce(&K, V) -> Option, + { + unsafe { + let mut spare_key = None; + + self.table + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + if let Some(new_value) = f(&key, value) { + Some((key, new_value)) + } else { + spare_key = Some(key); + None + } + }); + + if let Some(key) = spare_key { + Entry::Vacant(VacantEntry { + hash: self.hash, + key, + table: self.table, + }) + } else { + Entry::Occupied(self) + } + } + } +} + +impl<'a, K, V, S, A: Allocator> VacantEntry<'a, K, V, S, A> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `VacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// match map.entry("poneyland") { + /// Entry::Occupied(_) => panic!(), + /// Entry::Vacant(v) => assert_eq!(v.into_key(), "poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the [`VacantEntry`]'s key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let Entry::Vacant(o) = map.entry("poneyland") { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + let table = &mut self.table.table; + let entry = table.insert_entry( + self.hash, + (self.key, value), + make_hasher::<_, V, S>(&self.table.hash_builder), + ); + &mut entry.1 + } + + /// Sets the value of the entry with the [`VacantEntry`]'s key, + /// and returns an [`OccupiedEntry`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("poneyland") { + /// let o = v.insert_entry(37); + /// assert_eq!(o.get(), &37); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + let elem = self.table.table.insert( + self.hash, + (self.key, value), + make_hasher::<_, V, S>(&self.table.hash_builder), + ); + OccupiedEntry { + hash: self.hash, + elem, + table: self.table, + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { + /// Sets the value of the entry, and returns an `OccupiedEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// let entry = map.entry_ref("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), "horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> OccupiedEntry<'a, K, V, S, A> + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(mut entry) => { + entry.insert(value); + entry + } + EntryRef::Vacant(entry) => entry.insert_entry(value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry_ref("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_insert_with(|| 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry_ref("poneyland").or_insert_with(|| 10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => entry.insert(default()), + } + } + + /// Ensures a value is in the entry by inserting, if empty, the result of the default function. + /// This method allows for generating key-derived values for insertion by providing the default + /// function an access to the borrower form of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count()); + /// assert_eq!(map["poneyland"], 9); + /// + /// // existing key + /// *map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count() * 10) *= 2; + /// assert_eq!(map["poneyland"], 18); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with_key V>(self, default: F) -> &'a mut V + where + K: Hash + Borrow + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => { + let value = default(entry.key); + entry.insert(value) + } + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(3); + /// // existing key + /// assert_eq!(map.entry_ref("poneyland").key(), "poneyland"); + /// // nonexistent key + /// assert_eq!(map.entry_ref("horseland").key(), "horseland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &Q + where + K: Borrow, + { + match *self { + EntryRef::Occupied(ref entry) => entry.key().borrow(), + EntryRef::Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// map.entry_ref("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.entry_ref("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + EntryRef::Occupied(mut entry) => { + f(entry.get_mut()); + EntryRef::Occupied(entry) + } + EntryRef::Vacant(entry) => EntryRef::Vacant(entry), + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_default(); + /// assert_eq!(map["poneyland"], None); + /// + /// map.insert("horseland".to_string(), Some(3)); + /// + /// // existing key + /// assert_eq!(map.entry_ref("horseland").or_default(), &mut Some(3)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> VacantEntryRef<'a, 'b, K, Q, V, S, A> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `VacantEntryRef`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// let key: &str = "poneyland"; + /// assert_eq!(map.entry_ref(key).key(), "poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &'b Q { + self.key + } + + /// Sets the value of the entry with the `VacantEntryRef`'s key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// let key: &str = "poneyland"; + /// + /// if let EntryRef::Vacant(o) = map.entry_ref(key) { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + let table = &mut self.table.table; + let entry = table.insert_entry( + self.hash, + (self.key.into(), value), + make_hasher::<_, V, S>(&self.table.hash_builder), + ); + &mut entry.1 + } + + /// Sets the value of the entry with the [`VacantEntryRef`]'s key, + /// and returns an [`OccupiedEntry`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let EntryRef::Vacant(v) = map.entry_ref("poneyland") { + /// let o = v.insert_entry(37); + /// assert_eq!(o.get(), &37); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V, S, A> + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + let elem = self.table.table.insert( + self.hash, + (self.key.into(), value), + make_hasher::<_, V, S>(&self.table.hash_builder), + ); + OccupiedEntry { + hash: self.hash, + elem, + table: self.table, + } + } +} + +impl FromIterator<(K, V)> for HashMap +where + K: Eq + Hash, + S: BuildHasher + Default, + A: Default + Allocator, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let mut map = + Self::with_capacity_and_hasher_in(iter.size_hint().0, S::default(), A::default()); + iter.for_each(|(k, v)| { + map.insert(k, v); + }); + map + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl Extend<(K, V)> for HashMap +where + K: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + /// Inserts all new key-values from the iterator to existing `HashMap`. + /// Replace values with existing keys with new values returned from the iterator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, 100); + /// + /// let some_iter = [(1, 1), (2, 2)].into_iter(); + /// map.extend(some_iter); + /// // Replace values with existing keys with new values returned from the iterator. + /// // So that the map.get(&1) doesn't return Some(&100). + /// assert_eq!(map.get(&1), Some(&1)); + /// + /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; + /// map.extend(some_vec); + /// + /// let some_arr = [(5, 5), (6, 6)]; + /// map.extend(some_arr); + /// let old_map_len = map.len(); + /// + /// // You can also extend from another HashMap + /// let mut new_map = HashMap::new(); + /// new_map.extend(map); + /// assert_eq!(new_map.len(), old_map_len); + /// + /// let mut vec: Vec<_> = new_map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let iter = iter.into_iter(); + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + (iter.size_hint().0 + 1) / 2 + }; + self.reserve(reserve); + iter.for_each(move |(k, v)| { + self.insert(k, v); + }); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, (k, v): (K, V)) { + self.insert(k, v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let reserve = if self.is_empty() { + additional + } else { + (additional + 1) / 2 + }; + self.reserve(reserve); + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl<'a, K, V, S, A> Extend<(&'a K, &'a V)> for HashMap +where + K: Eq + Hash + Copy, + V: Copy, + S: BuildHasher, + A: Allocator, +{ + /// Inserts all new key-values from the iterator to existing `HashMap`. + /// Replace values with existing keys with new values returned from the iterator. + /// The keys and values must implement [`Copy`] trait. + /// + /// [`Copy`]: https://doc.rust-lang.org/core/marker/trait.Copy.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, 100); + /// + /// let arr = [(1, 1), (2, 2)]; + /// let some_iter = arr.iter().map(|(k, v)| (k, v)); + /// map.extend(some_iter); + /// // Replace values with existing keys with new values returned from the iterator. + /// // So that the map.get(&1) doesn't return Some(&100). + /// assert_eq!(map.get(&1), Some(&1)); + /// + /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; + /// map.extend(some_vec.iter().map(|(k, v)| (k, v))); + /// + /// let some_arr = [(5, 5), (6, 6)]; + /// map.extend(some_arr.iter().map(|(k, v)| (k, v))); + /// + /// // You can also extend from another HashMap + /// let mut new_map = HashMap::new(); + /// new_map.extend(&map); + /// assert_eq!(new_map, map); + /// + /// let mut vec: Vec<_> = new_map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, (k, v): (&'a K, &'a V)) { + self.insert(*k, *v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(K, V)>::extend_reserve(self, additional); + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl<'a, K, V, S, A> Extend<&'a (K, V)> for HashMap +where + K: Eq + Hash + Copy, + V: Copy, + S: BuildHasher, + A: Allocator, +{ + /// Inserts all new key-values from the iterator to existing `HashMap`. + /// Replace values with existing keys with new values returned from the iterator. + /// The keys and values must implement [`Copy`] trait. + /// + /// [`Copy`]: https://doc.rust-lang.org/core/marker/trait.Copy.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, 100); + /// + /// let arr = [(1, 1), (2, 2)]; + /// let some_iter = arr.iter(); + /// map.extend(some_iter); + /// // Replace values with existing keys with new values returned from the iterator. + /// // So that the map.get(&1) doesn't return Some(&100). + /// assert_eq!(map.get(&1), Some(&1)); + /// + /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; + /// map.extend(&some_vec); + /// + /// let some_arr = [(5, 5), (6, 6)]; + /// map.extend(&some_arr); + /// + /// let mut vec: Vec<_> = map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|&(key, value)| (key, value))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, &(k, v): &'a (K, V)) { + self.insert(k, v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(K, V)>::extend_reserve(self, additional); + } +} + +#[allow(dead_code)] +fn assert_covariance() { + fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { + v + } + fn map_val<'new>(v: HashMap) -> HashMap { + v + } + fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> { + v + } + fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { + v + } + fn into_iter_key<'new, A: Allocator>( + v: IntoIter<&'static str, u8, A>, + ) -> IntoIter<&'new str, u8, A> { + v + } + fn into_iter_val<'new, A: Allocator>( + v: IntoIter, + ) -> IntoIter { + v + } + fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> { + v + } + fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> { + v + } + fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> { + v + } + fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { + v + } + fn drain<'new>( + d: Drain<'static, &'static str, &'static str>, + ) -> Drain<'new, &'new str, &'new str> { + d + } +} + +#[cfg(test)] +mod test_map { + use super::DefaultHashBuilder; + use super::Entry::{Occupied, Vacant}; + use super::EntryRef; + use super::HashMap; + use alloc::string::{String, ToString}; + use alloc::sync::Arc; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::alloc::Layout; + use core::ptr::NonNull; + use core::sync::atomic::{AtomicI8, Ordering}; + use rand::{rngs::SmallRng, Rng, SeedableRng}; + use std::borrow::ToOwned; + use std::cell::RefCell; + use std::vec::Vec; + + #[test] + fn test_zero_capacities() { + type HM = HashMap; + + let m = HM::new(); + assert_eq!(m.capacity(), 0); + + let m = HM::default(); + assert_eq!(m.capacity(), 0); + + let m = HM::with_hasher(DefaultHashBuilder::default()); + assert_eq!(m.capacity(), 0); + + let m = HM::with_capacity(0); + assert_eq!(m.capacity(), 0); + + let m = HM::with_capacity_and_hasher(0, DefaultHashBuilder::default()); + assert_eq!(m.capacity(), 0); + + let mut m = HM::new(); + m.insert(1, 1); + m.insert(2, 2); + m.remove(&1); + m.remove(&2); + m.shrink_to_fit(); + assert_eq!(m.capacity(), 0); + + let mut m = HM::new(); + m.reserve(0); + assert_eq!(m.capacity(), 0); + } + + #[test] + fn test_create_capacity_zero() { + let mut m = HashMap::with_capacity(0); + + assert!(m.insert(1, 1).is_none()); + + assert!(m.contains_key(&1)); + assert!(!m.contains_key(&0)); + } + + #[test] + fn test_insert() { + let mut m = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&2).unwrap(), 4); + } + + #[test] + fn test_clone() { + let mut m = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + #[allow(clippy::redundant_clone)] + let m2 = m.clone(); + assert_eq!(*m2.get(&1).unwrap(), 2); + assert_eq!(*m2.get(&2).unwrap(), 4); + assert_eq!(m2.len(), 2); + } + + #[test] + fn test_clone_from() { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + m2.clone_from(&m); + assert_eq!(*m2.get(&1).unwrap(), 2); + assert_eq!(*m2.get(&2).unwrap(), 4); + assert_eq!(m2.len(), 2); + } + + thread_local! { static DROP_VECTOR: RefCell> = const { RefCell::new(Vec::new()) } } + + #[derive(Hash, PartialEq, Eq)] + struct Droppable { + k: usize, + } + + impl Droppable { + fn new(k: usize) -> Droppable { + DROP_VECTOR.with(|slot| { + slot.borrow_mut()[k] += 1; + }); + + Droppable { k } + } + } + + impl Drop for Droppable { + fn drop(&mut self) { + DROP_VECTOR.with(|slot| { + slot.borrow_mut()[self.k] -= 1; + }); + } + } + + impl Clone for Droppable { + fn clone(&self) -> Self { + Droppable::new(self.k) + } + } + + #[test] + fn test_drops() { + DROP_VECTOR.with(|slot| { + *slot.borrow_mut() = vec![0; 200]; + }); + + { + let mut m = HashMap::new(); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + + for i in 0..100 { + let d1 = Droppable::new(i); + let d2 = Droppable::new(i + 100); + m.insert(d1, d2); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + for i in 0..50 { + let k = Droppable::new(i); + let v = m.remove(&k); + + assert!(v.is_some()); + + DROP_VECTOR.with(|v| { + assert_eq!(v.borrow()[i], 1); + assert_eq!(v.borrow()[i + 100], 1); + }); + } + + DROP_VECTOR.with(|v| { + for i in 0..50 { + assert_eq!(v.borrow()[i], 0); + assert_eq!(v.borrow()[i + 100], 0); + } + + for i in 50..100 { + assert_eq!(v.borrow()[i], 1); + assert_eq!(v.borrow()[i + 100], 1); + } + }); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + } + + #[test] + fn test_into_iter_drops() { + DROP_VECTOR.with(|v| { + *v.borrow_mut() = vec![0; 200]; + }); + + let hm = { + let mut hm = HashMap::new(); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + + for i in 0..100 { + let d1 = Droppable::new(i); + let d2 = Droppable::new(i + 100); + hm.insert(d1, d2); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + { + let mut half = hm.into_iter().take(50); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + for _ in half.by_ref() {} + + DROP_VECTOR.with(|v| { + let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count(); + + let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count(); + + assert_eq!(nk, 50); + assert_eq!(nv, 50); + }); + }; + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + } + + #[test] + fn test_empty_remove() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.remove(&0), None); + } + + #[test] + fn test_empty_entry() { + let mut m: HashMap = HashMap::new(); + match m.entry(0) { + Occupied(_) => panic!(), + Vacant(_) => {} + } + assert!(*m.entry(0).or_insert(true)); + assert_eq!(m.len(), 1); + } + + #[test] + fn test_empty_entry_ref() { + let mut m: HashMap = HashMap::new(); + match m.entry_ref("poneyland") { + EntryRef::Occupied(_) => panic!(), + EntryRef::Vacant(_) => {} + } + assert!(*m.entry_ref("poneyland").or_insert(true)); + assert_eq!(m.len(), 1); + } + + #[test] + fn test_empty_iter() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.drain().next(), None); + assert_eq!(m.keys().next(), None); + assert_eq!(m.values().next(), None); + assert_eq!(m.values_mut().next(), None); + assert_eq!(m.iter().next(), None); + assert_eq!(m.iter_mut().next(), None); + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + assert_eq!(m.into_iter().next(), None); + } + + #[test] + #[cfg_attr(miri, ignore)] // FIXME: takes too long + fn test_lots_of_insertions() { + let mut m = HashMap::new(); + + // Try this a few times to make sure we never screw up the hashmap's + // internal state. + for _ in 0..10 { + assert!(m.is_empty()); + + for i in 1..1001 { + assert!(m.insert(i, i).is_none()); + + for j in 1..=i { + let r = m.get(&j); + assert_eq!(r, Some(&j)); + } + + for j in i + 1..1001 { + let r = m.get(&j); + assert_eq!(r, None); + } + } + + for i in 1001..2001 { + assert!(!m.contains_key(&i)); + } + + // remove forwards + for i in 1..1001 { + assert!(m.remove(&i).is_some()); + + for j in 1..=i { + assert!(!m.contains_key(&j)); + } + + for j in i + 1..1001 { + assert!(m.contains_key(&j)); + } + } + + for i in 1..1001 { + assert!(!m.contains_key(&i)); + } + + for i in 1..1001 { + assert!(m.insert(i, i).is_none()); + } + + // remove backwards + for i in (1..1001).rev() { + assert!(m.remove(&i).is_some()); + + for j in i..1001 { + assert!(!m.contains_key(&j)); + } + + for j in 1..i { + assert!(m.contains_key(&j)); + } + } + } + } + + #[test] + fn test_find_mut() { + let mut m = HashMap::new(); + assert!(m.insert(1, 12).is_none()); + assert!(m.insert(2, 8).is_none()); + assert!(m.insert(5, 14).is_none()); + let new = 100; + match m.get_mut(&5) { + None => panic!(), + Some(x) => *x = new, + } + assert_eq!(m.get(&5), Some(&new)); + } + + #[test] + fn test_insert_overwrite() { + let mut m = HashMap::new(); + assert!(m.insert(1, 2).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert!(m.insert(1, 3).is_some()); + assert_eq!(*m.get(&1).unwrap(), 3); + } + + #[test] + fn test_insert_conflicts() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert!(m.insert(5, 3).is_none()); + assert!(m.insert(9, 4).is_none()); + assert_eq!(*m.get(&9).unwrap(), 4); + assert_eq!(*m.get(&5).unwrap(), 3); + assert_eq!(*m.get(&1).unwrap(), 2); + } + + #[test] + fn test_conflict_remove() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert!(m.insert(5, 3).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&5).unwrap(), 3); + assert!(m.insert(9, 4).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&5).unwrap(), 3); + assert_eq!(*m.get(&9).unwrap(), 4); + assert!(m.remove(&1).is_some()); + assert_eq!(*m.get(&9).unwrap(), 4); + assert_eq!(*m.get(&5).unwrap(), 3); + } + + #[test] + fn test_insert_unique_unchecked() { + let mut map = HashMap::new(); + let (k1, v1) = unsafe { map.insert_unique_unchecked(10, 11) }; + assert_eq!((&10, &mut 11), (k1, v1)); + let (k2, v2) = unsafe { map.insert_unique_unchecked(20, 21) }; + assert_eq!((&20, &mut 21), (k2, v2)); + assert_eq!(Some(&11), map.get(&10)); + assert_eq!(Some(&21), map.get(&20)); + assert_eq!(None, map.get(&30)); + } + + #[test] + fn test_is_empty() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert!(!m.is_empty()); + assert!(m.remove(&1).is_some()); + assert!(m.is_empty()); + } + + #[test] + fn test_remove() { + let mut m = HashMap::new(); + m.insert(1, 2); + assert_eq!(m.remove(&1), Some(2)); + assert_eq!(m.remove(&1), None); + } + + #[test] + fn test_remove_entry() { + let mut m = HashMap::new(); + m.insert(1, 2); + assert_eq!(m.remove_entry(&1), Some((1, 2))); + assert_eq!(m.remove(&1), None); + } + + #[test] + fn test_iterate() { + let mut m = HashMap::with_capacity(4); + for i in 0..32 { + assert!(m.insert(i, i * 2).is_none()); + } + assert_eq!(m.len(), 32); + + let mut observed: u32 = 0; + + for (k, v) in &m { + assert_eq!(*v, *k * 2); + observed |= 1 << *k; + } + assert_eq!(observed, 0xFFFF_FFFF); + } + + #[test] + fn test_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.keys().copied().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: HashMap<_, _> = vec.into_iter().collect(); + for value in map.values_mut() { + *value *= 2; + } + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn test_into_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.into_keys().collect(); + + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_into_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.into_values().collect(); + + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_find() { + let mut m = HashMap::new(); + assert!(m.get(&1).is_none()); + m.insert(1, 2); + match m.get(&1) { + None => panic!(), + Some(v) => assert_eq!(*v, 2), + } + } + + #[test] + fn test_eq() { + let mut m1 = HashMap::new(); + m1.insert(1, 2); + m1.insert(2, 3); + m1.insert(3, 4); + + let mut m2 = HashMap::new(); + m2.insert(1, 2); + m2.insert(2, 3); + + assert!(m1 != m2); + + m2.insert(3, 4); + + assert_eq!(m1, m2); + } + + #[test] + fn test_show() { + let mut map = HashMap::new(); + let empty: HashMap = HashMap::new(); + + map.insert(1, 2); + map.insert(3, 4); + + let map_str = format!("{map:?}"); + + assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); + assert_eq!(format!("{empty:?}"), "{}"); + } + + #[test] + fn test_expand() { + let mut m = HashMap::new(); + + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + + let mut i = 0; + let old_raw_cap = m.raw_capacity(); + while old_raw_cap == m.raw_capacity() { + m.insert(i, i); + i += 1; + } + + assert_eq!(m.len(), i); + assert!(!m.is_empty()); + } + + #[test] + fn test_behavior_resize_policy() { + let mut m = HashMap::new(); + + assert_eq!(m.len(), 0); + assert_eq!(m.raw_capacity(), 1); + assert!(m.is_empty()); + + m.insert(0, 0); + m.remove(&0); + assert!(m.is_empty()); + let initial_raw_cap = m.raw_capacity(); + m.reserve(initial_raw_cap); + let raw_cap = m.raw_capacity(); + + assert_eq!(raw_cap, initial_raw_cap * 2); + + let mut i = 0; + for _ in 0..raw_cap * 3 / 4 { + m.insert(i, i); + i += 1; + } + // three quarters full + + assert_eq!(m.len(), i); + assert_eq!(m.raw_capacity(), raw_cap); + + for _ in 0..raw_cap / 4 { + m.insert(i, i); + i += 1; + } + // half full + + let new_raw_cap = m.raw_capacity(); + assert_eq!(new_raw_cap, raw_cap * 2); + + for _ in 0..raw_cap / 2 - 1 { + i -= 1; + m.remove(&i); + assert_eq!(m.raw_capacity(), new_raw_cap); + } + // A little more than one quarter full. + m.shrink_to_fit(); + assert_eq!(m.raw_capacity(), raw_cap); + // again, a little more than half full + for _ in 0..raw_cap / 2 { + i -= 1; + m.remove(&i); + } + m.shrink_to_fit(); + + assert_eq!(m.len(), i); + assert!(!m.is_empty()); + assert_eq!(m.raw_capacity(), initial_raw_cap); + } + + #[test] + fn test_reserve_shrink_to_fit() { + let mut m = HashMap::new(); + m.insert(0, 0); + m.remove(&0); + assert!(m.capacity() >= m.len()); + for i in 0..128 { + m.insert(i, i); + } + m.reserve(256); + + let usable_cap = m.capacity(); + for i in 128..(128 + 256) { + m.insert(i, i); + assert_eq!(m.capacity(), usable_cap); + } + + for i in 100..(128 + 256) { + assert_eq!(m.remove(&i), Some(i)); + } + m.shrink_to_fit(); + + assert_eq!(m.len(), 100); + assert!(!m.is_empty()); + assert!(m.capacity() >= m.len()); + + for i in 0..100 { + assert_eq!(m.remove(&i), Some(i)); + } + m.shrink_to_fit(); + m.insert(0, 0); + + assert_eq!(m.len(), 1); + assert!(m.capacity() >= m.len()); + assert_eq!(m.remove(&0), Some(0)); + } + + #[test] + fn test_from_iter() { + let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().copied().collect(); + + for &(k, v) in &xs { + assert_eq!(map.get(&k), Some(&v)); + } + + assert_eq!(map.iter().len(), xs.len() - 1); + } + + #[test] + fn test_size_hint() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.size_hint(), (3, Some(3))); + } + + #[test] + fn test_iter_len() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.len(), 3); + } + + #[test] + fn test_mut_size_hint() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter_mut(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.size_hint(), (3, Some(3))); + } + + #[test] + fn test_iter_mut_len() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter_mut(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.len(), 3); + } + + #[test] + fn test_index() { + let mut map = HashMap::new(); + + map.insert(1, 2); + map.insert(2, 1); + map.insert(3, 4); + + assert_eq!(map[&2], 1); + } + + #[test] + #[should_panic] + fn test_index_nonexistent() { + let mut map = HashMap::new(); + + map.insert(1, 2); + map.insert(2, 1); + map.insert(3, 4); + + #[allow(clippy::no_effect)] // false positive lint + map[&4]; + } + + #[test] + fn test_entry() { + let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + // Existing key (insert) + match map.entry(1) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + assert_eq!(map.get(&1).unwrap(), &100); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.entry(2) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + assert_eq!(map.get(&2).unwrap(), &200); + assert_eq!(map.len(), 6); + + // Existing key (take) + match map.entry(3) { + Vacant(_) => unreachable!(), + Occupied(view) => { + assert_eq!(view.remove(), 30); + } + } + assert_eq!(map.get(&3), None); + assert_eq!(map.len(), 5); + + // Inexistent key (insert) + match map.entry(10) { + Occupied(_) => unreachable!(), + Vacant(view) => { + assert_eq!(*view.insert(1000), 1000); + } + } + assert_eq!(map.get(&10).unwrap(), &1000); + assert_eq!(map.len(), 6); + } + + #[test] + fn test_entry_ref() { + let xs = [ + ("One".to_owned(), 10), + ("Two".to_owned(), 20), + ("Three".to_owned(), 30), + ("Four".to_owned(), 40), + ("Five".to_owned(), 50), + ("Six".to_owned(), 60), + ]; + + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); + + // Existing key (insert) + match map.entry_ref("One") { + EntryRef::Vacant(_) => unreachable!(), + EntryRef::Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + assert_eq!(map.get("One").unwrap(), &100); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.entry_ref("Two") { + EntryRef::Vacant(_) => unreachable!(), + EntryRef::Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + assert_eq!(map.get("Two").unwrap(), &200); + assert_eq!(map.len(), 6); + + // Existing key (take) + match map.entry_ref("Three") { + EntryRef::Vacant(_) => unreachable!(), + EntryRef::Occupied(view) => { + assert_eq!(view.remove(), 30); + } + } + assert_eq!(map.get("Three"), None); + assert_eq!(map.len(), 5); + + // Inexistent key (insert) + match map.entry_ref("Ten") { + EntryRef::Occupied(_) => unreachable!(), + EntryRef::Vacant(view) => { + assert_eq!(*view.insert(1000), 1000); + } + } + assert_eq!(map.get("Ten").unwrap(), &1000); + assert_eq!(map.len(), 6); + } + + #[test] + fn test_entry_take_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let x = rng.gen_range(-10..10); + m.insert(x, ()); + } + + for _ in 0..1000 { + let x = rng.gen_range(-10..10); + match m.entry(x) { + Vacant(_) => {} + Occupied(e) => { + e.remove(); + } + } + + check(&m); + } + } + + #[test] + fn test_entry_ref_take_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let mut x = std::string::String::with_capacity(1); + x.push(rng.gen_range('a'..='z')); + m.insert(x, ()); + } + + for _ in 0..1000 { + let mut x = std::string::String::with_capacity(1); + x.push(rng.gen_range('a'..='z')); + match m.entry_ref(x.as_str()) { + EntryRef::Vacant(_) => {} + EntryRef::Occupied(e) => { + e.remove(); + } + } + + check(&m); + } + } + + #[test] + fn test_extend_ref_k_ref_v() { + let mut a = HashMap::new(); + a.insert(1, "one"); + let mut b = HashMap::new(); + b.insert(2, "two"); + b.insert(3, "three"); + + a.extend(&b); + + assert_eq!(a.len(), 3); + assert_eq!(a[&1], "one"); + assert_eq!(a[&2], "two"); + assert_eq!(a[&3], "three"); + } + + #[test] + #[allow(clippy::needless_borrow)] + fn test_extend_ref_kv_tuple() { + use std::ops::AddAssign; + let mut a = HashMap::new(); + a.insert(0, 0); + + fn create_arr + Copy, const N: usize>(start: T, step: T) -> [(T, T); N] { + let mut outs: [(T, T); N] = [(start, start); N]; + let mut element = step; + outs.iter_mut().skip(1).for_each(|(k, v)| { + *k += element; + *v += element; + element += step; + }); + outs + } + + let for_iter: Vec<_> = (0..100).map(|i| (i, i)).collect(); + let iter = for_iter.iter(); + let vec: Vec<_> = (100..200).map(|i| (i, i)).collect(); + a.extend(iter); + a.extend(&vec); + a.extend(create_arr::(200, 1)); + + assert_eq!(a.len(), 300); + + for item in 0..300 { + assert_eq!(a[&item], item); + } + } + + #[test] + fn test_capacity_not_less_than_len() { + let mut a = HashMap::new(); + let mut item = 0; + + for _ in 0..116 { + a.insert(item, 0); + item += 1; + } + + assert!(a.capacity() > a.len()); + + let free = a.capacity() - a.len(); + for _ in 0..free { + a.insert(item, 0); + item += 1; + } + + assert_eq!(a.len(), a.capacity()); + + // Insert at capacity should cause allocation. + a.insert(item, 0); + assert!(a.capacity() > a.len()); + } + + #[test] + fn test_occupied_entry_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + assert!(a.is_empty()); + a.insert(key, value); + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + + match a.entry(key) { + Vacant(_) => panic!(), + Occupied(e) => assert_eq!(key, *e.key()), + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_occupied_entry_ref_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + assert!(a.is_empty()); + a.insert(key.to_owned(), value); + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + + match a.entry_ref(key) { + EntryRef::Vacant(_) => panic!(), + EntryRef::Occupied(e) => assert_eq!(key, e.key()), + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_vacant_entry_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + + assert!(a.is_empty()); + match a.entry(key) { + Occupied(_) => panic!(), + Vacant(e) => { + assert_eq!(key, *e.key()); + e.insert(value); + } + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_vacant_entry_ref_key() { + let mut a: HashMap = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + + assert!(a.is_empty()); + match a.entry_ref(key) { + EntryRef::Occupied(_) => panic!(), + EntryRef::Vacant(e) => { + assert_eq!(key, e.key()); + e.insert(value); + } + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_occupied_entry_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry(key).insert(value).replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.entry(key) { + Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }), + Vacant(_) => panic!(), + }; + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_entry_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry(key).and_replace_entry_with(|_, _| panic!()); + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + a.insert(key, value); + + let entry = a.entry(key).and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a.entry(key).and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }); + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_replace_entry_with_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let x = rng.gen_range(-10..10); + m.insert(x, ()); + } + + for _ in 0..1000 { + let x = rng.gen_range(-10..10); + m.entry(x).and_replace_entry_with(|_, _| None); + check(&m); + } + } + + #[test] + fn test_retain() { + let mut map: HashMap = (0..100).map(|x| (x, x * 10)).collect(); + + map.retain(|&k, _| k % 2 == 0); + assert_eq!(map.len(), 50); + assert_eq!(map[&2], 20); + assert_eq!(map[&4], 40); + assert_eq!(map[&6], 60); + } + + #[test] + fn test_extract_if() { + { + let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); + let drained = map.extract_if(|&k, _| k % 2 == 0); + let mut out = drained.collect::>(); + out.sort_unstable(); + assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out); + assert_eq!(map.len(), 4); + } + { + let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); + map.extract_if(|&k, _| k % 2 == 0).for_each(drop); + assert_eq!(map.len(), 4); + } + } + + #[test] + #[cfg_attr(miri, ignore)] // FIXME: no OOM signalling (https://github.com/rust-lang/miri/issues/613) + fn test_try_reserve() { + use crate::TryReserveError::{AllocError, CapacityOverflow}; + + const MAX_ISIZE: usize = isize::MAX as usize; + + let mut empty_bytes: HashMap = HashMap::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve(usize::MAX) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_ISIZE) { + } else { + panic!("isize::MAX should trigger an overflow!"); + } + + if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_ISIZE / 5) { + } else { + // This may succeed if there is enough free memory. Attempt to + // allocate a few more hashmaps to ensure the allocation will fail. + let mut empty_bytes2: HashMap = HashMap::new(); + let _ = empty_bytes2.try_reserve(MAX_ISIZE / 5); + let mut empty_bytes3: HashMap = HashMap::new(); + let _ = empty_bytes3.try_reserve(MAX_ISIZE / 5); + let mut empty_bytes4: HashMap = HashMap::new(); + if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_ISIZE / 5) { + } else { + panic!("isize::MAX / 5 should trigger an OOM!"); + } + } + } + + #[test] + fn test_const_with_hasher() { + use core::hash::BuildHasher; + use std::collections::hash_map::DefaultHasher; + + #[derive(Clone)] + struct MyHasher; + impl BuildHasher for MyHasher { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> DefaultHasher { + DefaultHasher::new() + } + } + + const EMPTY_MAP: HashMap = + HashMap::with_hasher(MyHasher); + + let mut map = EMPTY_MAP; + map.insert(17, "seventeen".to_owned()); + assert_eq!("seventeen", map[&17]); + } + + #[test] + fn test_get_many_mut() { + let mut map = HashMap::new(); + map.insert("foo".to_owned(), 0); + map.insert("bar".to_owned(), 10); + map.insert("baz".to_owned(), 20); + map.insert("qux".to_owned(), 30); + + let xs = map.get_many_mut(["foo", "qux"]); + assert_eq!(xs, [Some(&mut 0), Some(&mut 30)]); + + let xs = map.get_many_mut(["foo", "dud"]); + assert_eq!(xs, [Some(&mut 0), None]); + + let ys = map.get_many_key_value_mut(["bar", "baz"]); + assert_eq!( + ys, + [ + Some((&"bar".to_owned(), &mut 10)), + Some((&"baz".to_owned(), &mut 20)) + ], + ); + + let ys = map.get_many_key_value_mut(["bar", "dip"]); + assert_eq!(ys, [Some((&"bar".to_string(), &mut 10)), None]); + } + + #[test] + #[should_panic = "duplicate keys found"] + fn test_get_many_mut_duplicate() { + let mut map = HashMap::new(); + map.insert("foo".to_owned(), 0); + + let _xs = map.get_many_mut(["foo", "foo"]); + } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_from_double_drop() { + #[derive(Clone)] + struct CheckedDrop { + panic_in_drop: bool, + dropped: bool, + } + impl Drop for CheckedDrop { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); + } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + const DISARMED: CheckedDrop = CheckedDrop { + panic_in_drop: false, + dropped: false, + }; + const ARMED: CheckedDrop = CheckedDrop { + panic_in_drop: true, + dropped: false, + }; + + let mut map1 = HashMap::new(); + map1.insert(1, DISARMED); + map1.insert(2, DISARMED); + map1.insert(3, DISARMED); + map1.insert(4, DISARMED); + + let mut map2 = HashMap::new(); + map2.insert(1, DISARMED); + map2.insert(2, ARMED); + map2.insert(3, DISARMED); + map2.insert(4, DISARMED); + + map2.clone_from(&map1); + } + + #[test] + #[should_panic = "panic in clone"] + fn test_clone_from_memory_leaks() { + use alloc::vec::Vec; + + struct CheckedClone { + panic_in_clone: bool, + need_drop: Vec, + } + impl Clone for CheckedClone { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + need_drop: self.need_drop.clone(), + } + } + } + let mut map1 = HashMap::new(); + map1.insert( + 1, + CheckedClone { + panic_in_clone: false, + need_drop: vec![0, 1, 2], + }, + ); + map1.insert( + 2, + CheckedClone { + panic_in_clone: false, + need_drop: vec![3, 4, 5], + }, + ); + map1.insert( + 3, + CheckedClone { + panic_in_clone: true, + need_drop: vec![6, 7, 8], + }, + ); + let _map2 = map1.clone(); + } + + struct MyAllocInner { + drop_count: Arc, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc, + } + + impl MyAlloc { + fn new(drop_count: Arc) -> Self { + MyAlloc { + _inner: Arc::new(MyAllocInner { drop_count }), + } + } + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + #[test] + fn test_hashmap_into_iter_bug() { + let dropped: Arc = Arc::new(AtomicI8::new(1)); + + { + let mut map = HashMap::with_capacity_in(10, MyAlloc::new(dropped.clone())); + for i in 0..10 { + map.entry(i).or_insert_with(|| "i".to_string()); + } + + for (k, v) in map { + println!("{}, {}", k, v); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + #[derive(Debug)] + struct CheckedCloneDrop { + panic_in_clone: bool, + panic_in_drop: bool, + dropped: bool, + data: T, + } + + impl CheckedCloneDrop { + fn new(panic_in_clone: bool, panic_in_drop: bool, data: T) -> Self { + CheckedCloneDrop { + panic_in_clone, + panic_in_drop, + dropped: false, + data, + } + } + } + + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + panic_in_drop: self.panic_in_drop, + dropped: self.dropped, + data: self.data.clone(), + } + } + } + + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); + } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + /// Return hashmap with predefined distribution of elements. + /// All elements will be located in the same order as elements + /// returned by iterator. + /// + /// This function does not panic, but returns an error as a `String` + /// to distinguish between a test panic and an error in the input data. + fn get_test_map( + iter: I, + mut fun: impl FnMut(u64) -> T, + alloc: A, + ) -> Result, DefaultHashBuilder, A>, String> + where + I: Iterator + Clone + ExactSizeIterator, + A: Allocator, + T: PartialEq + core::fmt::Debug, + { + use crate::scopeguard::guard; + + let mut map: HashMap, _, A> = + HashMap::with_capacity_in(iter.size_hint().0, alloc); + { + let mut guard = guard(&mut map, |map| { + for (_, value) in map.iter_mut() { + value.panic_in_drop = false + } + }); + + let mut count = 0; + // Hash and Key must be equal to each other for controlling the elements placement. + for (panic_in_clone, panic_in_drop) in iter.clone() { + if core::mem::needs_drop::() && panic_in_drop { + return Err(String::from( + "panic_in_drop can be set with a type that doesn't need to be dropped", + )); + } + guard.table.insert( + count, + ( + count, + CheckedCloneDrop::new(panic_in_clone, panic_in_drop, fun(count)), + ), + |(k, _)| *k, + ); + count += 1; + } + + // Let's check that all elements are located as we wanted + let mut check_count = 0; + for ((key, value), (panic_in_clone, panic_in_drop)) in guard.iter().zip(iter) { + if *key != check_count { + return Err(format!( + "key != check_count,\nkey: `{}`,\ncheck_count: `{}`", + key, check_count + )); + } + if value.dropped + || value.panic_in_clone != panic_in_clone + || value.panic_in_drop != panic_in_drop + || value.data != fun(check_count) + { + return Err(format!( + "Value is not equal to expected,\nvalue: `{:?}`,\nexpected: \ + `CheckedCloneDrop {{ panic_in_clone: {}, panic_in_drop: {}, dropped: {}, data: {:?} }}`", + value, panic_in_clone, panic_in_drop, false, fun(check_count) + )); + } + check_count += 1; + } + + if guard.len() != check_count as usize { + return Err(format!( + "map.len() != check_count,\nmap.len(): `{}`,\ncheck_count: `{}`", + guard.len(), + check_count + )); + } + + if count != check_count { + return Err(format!( + "count != check_count,\ncount: `{}`,\ncheck_count: `{}`", + count, check_count + )); + } + core::mem::forget(guard); + } + Ok(map) + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + const ARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + const DISARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + #[test] + #[should_panic = "panic in clone"] + fn test_clone_memory_leaks_and_double_drop_one() { + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap>, DefaultHashBuilder, MyAlloc> = + match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // Clone should normally clone a few elements, and then (when the + // clone function panics), deallocate both its own memory, memory + // of `dropped: Arc` and the memory of already cloned + // elements (Vec memory inside CheckedCloneDrop). + let _map2 = map.clone(); + } + } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_memory_leaks_and_double_drop_two() { + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap, DefaultHashBuilder, _> = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + let mut map2 = match get_test_map( + DISARMED_FLAGS.into_iter().zip(ARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // The `clone_from` should try to drop the elements of `map2` without + // double drop and leaking the allocator. Elements that have not been + // dropped leak their memory. + map2.clone_from(&map); + } + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_equal() { + use std::thread; + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = + match get_test_map(ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), |n| vec![n * 2], MyAlloc::new(dropped.clone())) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() != scope_map.table.buckets() { + return format!( + "map.table.buckets() != scope_map.table.buckets(),\nleft: `{}`,\nright: `{}`", + map.table.buckets(), scope_map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are not equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_not_equal() { + use std::thread; + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + [DISARMED].into_iter().zip([DISARMED]), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n * 2], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() == scope_map.table.buckets() { + return format!( + "map.table.buckets() == scope_map.table.buckets(): `{}`", + map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + #[test] + fn test_allocation_info() { + assert_eq!(HashMap::<(), ()>::new().allocation_size(), 0); + assert_eq!(HashMap::::new().allocation_size(), 0); + assert!( + HashMap::::with_capacity(1).allocation_size() > core::mem::size_of::() + ); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/alloc.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/alloc.rs new file mode 100644 index 000000000000..15299e7b0994 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/alloc.rs @@ -0,0 +1,86 @@ +pub(crate) use self::inner::{do_alloc, Allocator, Global}; + +// Nightly-case. +// Use unstable `allocator_api` feature. +// This is compatible with `allocator-api2` which can be enabled or not. +// This is used when building for `std`. +#[cfg(feature = "nightly")] +mod inner { + use crate::alloc::alloc::Layout; + pub use crate::alloc::alloc::{Allocator, Global}; + use core::ptr::NonNull; + + #[allow(clippy::map_err_ignore)] + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + match alloc.allocate(layout) { + Ok(ptr) => Ok(ptr.as_non_null_ptr()), + Err(_) => Err(()), + } + } +} + +// Basic non-nightly case. +// This uses `allocator-api2` enabled by default. +// If any crate enables "nightly" in `allocator-api2`, +// this will be equivalent to the nightly case, +// since `allocator_api2::alloc::Allocator` would be re-export of +// `core::alloc::Allocator`. +#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))] +mod inner { + use crate::alloc::alloc::Layout; + pub use allocator_api2::alloc::{Allocator, Global}; + use core::ptr::NonNull; + + #[allow(clippy::map_err_ignore)] + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + match alloc.allocate(layout) { + Ok(ptr) => Ok(ptr.cast()), + Err(_) => Err(()), + } + } +} + +// No-defaults case. +// When building with default-features turned off and +// neither `nightly` nor `allocator-api2` is enabled, +// this will be used. +// Making it impossible to use any custom allocator with collections defined +// in this crate. +// Any crate in build-tree can enable `allocator-api2`, +// or `nightly` without disturbing users that don't want to use it. +#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))] +mod inner { + use crate::alloc::alloc::{alloc, dealloc, Layout}; + use core::ptr::NonNull; + + #[allow(clippy::missing_safety_doc)] // not exposed outside of this crate + pub unsafe trait Allocator { + fn allocate(&self, layout: Layout) -> Result, ()>; + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); + } + + #[derive(Copy, Clone)] + pub struct Global; + + unsafe impl Allocator for Global { + #[inline] + fn allocate(&self, layout: Layout) -> Result, ()> { + unsafe { NonNull::new(alloc(layout)).ok_or(()) } + } + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + dealloc(ptr.as_ptr(), layout); + } + } + + impl Default for Global { + #[inline] + fn default() -> Self { + Global + } + } + + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + alloc.allocate(layout) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/bitmask.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/bitmask.rs new file mode 100644 index 000000000000..87a5a6462aab --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/bitmask.rs @@ -0,0 +1,117 @@ +use super::imp::{ + BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, +}; + +/// A bit mask which contains the result of a `Match` operation on a `Group` and +/// allows iterating through them. +/// +/// The bit mask is arranged so that low-order bits represent lower memory +/// addresses for group match results. +/// +/// For implementation reasons, the bits in the set may be sparsely packed with +/// groups of 8 bits representing one element. If any of these bits are non-zero +/// then this element is considered to true in the mask. If this is the +/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be +/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is +/// similarly a mask of all the actually-used bits. +/// +/// To iterate over a bit mask, it must be converted to a form where only 1 bit +/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the +/// mask bits. +#[derive(Copy, Clone)] +pub(crate) struct BitMask(pub(crate) BitMaskWord); + +#[allow(clippy::use_self)] +impl BitMask { + /// Returns a new `BitMask` with all bits inverted. + #[inline] + #[must_use] + #[allow(dead_code)] + pub(crate) fn invert(self) -> Self { + BitMask(self.0 ^ BITMASK_MASK) + } + + /// Returns a new `BitMask` with the lowest bit removed. + #[inline] + #[must_use] + fn remove_lowest_bit(self) -> Self { + BitMask(self.0 & (self.0 - 1)) + } + + /// Returns whether the `BitMask` has at least one set bit. + #[inline] + pub(crate) fn any_bit_set(self) -> bool { + self.0 != 0 + } + + /// Returns the first set bit in the `BitMask`, if there is one. + #[inline] + pub(crate) fn lowest_set_bit(self) -> Option { + if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) { + Some(Self::nonzero_trailing_zeros(nonzero)) + } else { + None + } + } + + /// Returns the number of trailing zeroes in the `BitMask`. + #[inline] + pub(crate) fn trailing_zeros(self) -> usize { + // ARM doesn't have a trailing_zeroes instruction, and instead uses + // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM + // versions (pre-ARMv7) don't have RBIT and need to emulate it + // instead. Since we only have 1 bit set in each byte on ARM, we can + // use swap_bytes (REV) + leading_zeroes instead. + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE + } else { + self.0.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Same as above but takes a `NonZeroBitMaskWord`. + #[inline] + fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize { + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + // SAFETY: A byte-swapped non-zero value is still non-zero. + let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) }; + swapped.leading_zeros() as usize / BITMASK_STRIDE + } else { + nonzero.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Returns the number of leading zeroes in the `BitMask`. + #[inline] + pub(crate) fn leading_zeros(self) -> usize { + self.0.leading_zeros() as usize / BITMASK_STRIDE + } +} + +impl IntoIterator for BitMask { + type Item = usize; + type IntoIter = BitMaskIter; + + #[inline] + fn into_iter(self) -> BitMaskIter { + // A BitMask only requires each element (group of bits) to be non-zero. + // However for iteration we need each element to only contain 1 bit. + BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK)) + } +} + +/// Iterator over the contents of a `BitMask`, returning the indices of set +/// bits. +#[derive(Copy, Clone)] +pub(crate) struct BitMaskIter(pub(crate) BitMask); + +impl Iterator for BitMaskIter { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + let bit = self.0.lowest_set_bit()?; + self.0 = self.0.remove_lowest_bit(); + Some(bit) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/generic.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/generic.rs new file mode 100644 index 000000000000..435164479927 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/generic.rs @@ -0,0 +1,157 @@ +use super::bitmask::BitMask; +use super::Tag; +use core::{mem, ptr}; + +// Use the native word size as the group size. Using a 64-bit group size on +// a 32-bit architecture will just end up being more expensive because +// shifts and multiplies will need to be emulated. + +cfg_if! { + if #[cfg(any( + target_pointer_width = "64", + target_arch = "aarch64", + target_arch = "x86_64", + target_arch = "wasm32", + ))] { + type GroupWord = u64; + type NonZeroGroupWord = core::num::NonZeroU64; + } else { + type GroupWord = u32; + type NonZeroGroupWord = core::num::NonZeroU32; + } +} + +pub(crate) type BitMaskWord = GroupWord; +pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord; +pub(crate) const BITMASK_STRIDE: usize = 8; +// We only care about the highest bit of each tag for the mask. +#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] +pub(crate) const BITMASK_MASK: BitMaskWord = u64::from_ne_bytes([Tag::DELETED.0; 8]) as GroupWord; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; + +/// Helper function to replicate a tag across a `GroupWord`. +#[inline] +fn repeat(tag: Tag) -> GroupWord { + GroupWord::from_ne_bytes([tag.0; Group::WIDTH]) +} + +/// Abstraction over a group of control tags which can be scanned in +/// parallel. +/// +/// This implementation uses a word-sized integer. +#[derive(Copy, Clone)] +pub(crate) struct Group(GroupWord); + +// We perform all operations in the native endianness, and convert to +// little-endian just before creating a BitMask. The can potentially +// enable the compiler to eliminate unnecessary byte swaps if we are +// only checking whether a BitMask is empty. +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty tags, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] { + #[repr(C)] + struct AlignedTags { + _align: [Group; 0], + tags: [Tag; Group::WIDTH], + } + const ALIGNED_TAGS: AlignedTags = AlignedTags { + _align: [], + tags: [Tag::EMPTY; Group::WIDTH], + }; + &ALIGNED_TAGS.tags + } + + /// Loads a group of tags starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const Tag) -> Self { + Group(ptr::read_unaligned(ptr.cast())) + } + + /// Loads a group of tags starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(ptr::read(ptr.cast())) + } + + /// Stores the group of tags to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + ptr::write(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all tags in the group which *may* + /// have the given value. + /// + /// This function may return a false positive in certain cases where + /// the tag in the group differs from the searched value only in its + /// lowest bit. This is fine because: + /// - This never happens for `EMPTY` and `DELETED`, only full entries. + /// - The check for key equality will catch these. + /// - This only happens if there is at least 1 true match. + /// - The chance of this happening is very low (< 1% chance per byte). + #[inline] + pub(crate) fn match_tag(self, tag: Tag) -> BitMask { + // This algorithm is derived from + // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + let cmp = self.0 ^ repeat(tag); + BitMask((cmp.wrapping_sub(repeat(Tag(0x01))) & !cmp & repeat(Tag::DELETED)).to_le()) + } + + /// Returns a `BitMask` indicating all tags in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + // If the high bit is set, then the tag must be either: + // 1111_1111 (EMPTY) or 1000_0000 (DELETED). + // So we can just check if the top two bits are 1 by ANDing them. + BitMask((self.0 & (self.0 << 1) & repeat(Tag::DELETED)).to_le()) + } + + /// Returns a `BitMask` indicating all tags in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + // A tag is EMPTY or DELETED iff the high bit is set + BitMask((self.0 & repeat(Tag::DELETED)).to_le()) + } + + /// Returns a `BitMask` indicating all tags in the group which are full. + #[inline] + pub(crate) fn match_full(self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all tags in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let full = 1000_0000 (true) or 0000_0000 (false) + // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry) + // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry) + let full = !self.0 & repeat(Tag::DELETED); + Group(!full + (full >> 7)) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/mod.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/mod.rs new file mode 100644 index 000000000000..1c4a5f42e741 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/mod.rs @@ -0,0 +1,4481 @@ +use crate::alloc::alloc::{handle_alloc_error, Layout}; +use crate::scopeguard::{guard, ScopeGuard}; +use crate::TryReserveError; +use core::array; +use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::mem; +use core::ptr::NonNull; +use core::{hint, ptr}; + +cfg_if! { + // Use the SSE2 implementation if possible: it allows us to scan 16 buckets + // at once instead of 8. We don't bother with AVX since it would require + // runtime dispatch and wouldn't gain us much anyways: the probability of + // finding a match drops off drastically after the first few buckets. + // + // I attempted an implementation on ARM using NEON instructions, but it + // turns out that most NEON instructions have multi-cycle latency, which in + // the end outweighs any gains over the generic implementation. + if #[cfg(all( + target_feature = "sse2", + any(target_arch = "x86", target_arch = "x86_64"), + not(miri), + ))] { + mod sse2; + use sse2 as imp; + } else if #[cfg(all( + target_arch = "aarch64", + target_feature = "neon", + // NEON intrinsics are currently broken on big-endian targets. + // See https://github.com/rust-lang/stdarch/issues/1484. + target_endian = "little", + not(miri), + ))] { + mod neon; + use neon as imp; + } else { + mod generic; + use generic as imp; + } +} + +mod alloc; +pub(crate) use self::alloc::{do_alloc, Allocator, Global}; + +mod bitmask; + +use self::bitmask::BitMaskIter; +use self::imp::Group; + +// Branch prediction hint. This is currently only available on nightly but it +// consistently improves performance by 10-15%. +#[cfg(not(feature = "nightly"))] +use core::convert::{identity as likely, identity as unlikely}; +#[cfg(feature = "nightly")] +use core::intrinsics::{likely, unlikely}; + +// FIXME: use strict provenance functions once they are stable. +// Implement it with a transmute for now. +#[inline(always)] +#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here +fn invalid_mut(addr: usize) -> *mut T { + unsafe { core::mem::transmute(addr) } +} + +#[inline] +unsafe fn offset_from(to: *const T, from: *const T) -> usize { + to.offset_from(from) as usize +} + +/// Whether memory allocation errors should return an error or abort. +#[derive(Copy, Clone)] +enum Fallibility { + Fallible, + Infallible, +} + +impl Fallibility { + /// Error to return on capacity overflow. + #[cfg_attr(feature = "inline-more", inline)] + fn capacity_overflow(self) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::CapacityOverflow, + Fallibility::Infallible => panic!("Hash table capacity overflow"), + } + } + + /// Error to return on allocation error. + #[cfg_attr(feature = "inline-more", inline)] + fn alloc_err(self, layout: Layout) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::AllocError { layout }, + Fallibility::Infallible => handle_alloc_error(layout), + } + } +} + +trait SizedTypeProperties: Sized { + const IS_ZERO_SIZED: bool = mem::size_of::() == 0; + const NEEDS_DROP: bool = mem::needs_drop::(); +} + +impl SizedTypeProperties for T {} + +/// Single tag in a control group. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[repr(transparent)] +pub(crate) struct Tag(u8); +impl Tag { + /// Control tag value for an empty bucket. + const EMPTY: Tag = Tag(0b1111_1111); + + /// Control tag value for a deleted bucket. + const DELETED: Tag = Tag(0b1000_0000); + + /// Checks whether a control tag represents a full bucket (top bit is clear). + #[inline] + const fn is_full(self) -> bool { + self.0 & 0x80 == 0 + } + + /// Checks whether a control tag represents a special value (top bit is set). + #[inline] + const fn is_special(self) -> bool { + self.0 & 0x80 != 0 + } + + /// Checks whether a special control value is EMPTY (just check 1 bit). + #[inline] + const fn special_is_empty(self) -> bool { + debug_assert!(self.is_special()); + self.0 & 0x01 != 0 + } + + /// Creates a control tag representing a full bucket with the given hash. + #[inline] + #[allow(clippy::cast_possible_truncation)] + const fn full(hash: u64) -> Tag { + // Constant for function that grabs the top 7 bits of the hash. + const MIN_HASH_LEN: usize = if mem::size_of::() < mem::size_of::() { + mem::size_of::() + } else { + mem::size_of::() + }; + + // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit + // value, some hash functions (such as FxHash) produce a usize result + // instead, which means that the top 32 bits are 0 on 32-bit platforms. + // So we use MIN_HASH_LEN constant to handle this. + let top7 = hash >> (MIN_HASH_LEN * 8 - 7); + Tag((top7 & 0x7f) as u8) // truncation + } +} + +/// Primary hash function, used to select the initial bucket to probe from. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h1(hash: u64) -> usize { + // On 32-bit platforms we simply ignore the higher hash bits. + hash as usize +} + +/// Probe sequence based on triangular numbers, which is guaranteed (since our +/// table size is a power of two) to visit every group of elements exactly once. +/// +/// A triangular probe has us jump by 1 more group every time. So first we +/// jump by 1 group (meaning we just continue our linear scan), then 2 groups +/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on. +/// +/// Proof that the probe will visit every group in the table: +/// +#[derive(Clone)] +struct ProbeSeq { + pos: usize, + stride: usize, +} + +impl ProbeSeq { + #[inline] + fn move_next(&mut self, bucket_mask: usize) { + // We should have found an empty bucket by now and ended the probe. + debug_assert!( + self.stride <= bucket_mask, + "Went past end of probe sequence" + ); + + self.stride += Group::WIDTH; + self.pos += self.stride; + self.pos &= bucket_mask; + } +} + +/// Returns the number of buckets needed to hold the given number of items, +/// taking the maximum load factor into account. +/// +/// Returns `None` if an overflow occurs. +// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 +#[cfg_attr(target_os = "emscripten", inline(never))] +#[cfg_attr(not(target_os = "emscripten"), inline)] +fn capacity_to_buckets(cap: usize) -> Option { + debug_assert_ne!(cap, 0); + + // For small tables we require at least 1 empty bucket so that lookups are + // guaranteed to terminate if an element doesn't exist in the table. + if cap < 8 { + // We don't bother with a table size of 2 buckets since that can only + // hold a single element. Instead we skip directly to a 4 bucket table + // which can hold 3 elements. + return Some(if cap < 4 { 4 } else { 8 }); + } + + // Otherwise require 1/8 buckets to be empty (87.5% load) + // + // Be careful when modifying this, calculate_layout relies on the + // overflow check here. + let adjusted_cap = cap.checked_mul(8)? / 7; + + // Any overflows will have been caught by the checked_mul. Also, any + // rounding errors from the division above will be cleaned up by + // next_power_of_two (which can't overflow because of the previous division). + Some(adjusted_cap.next_power_of_two()) +} + +/// Returns the maximum effective capacity for the given bucket mask, taking +/// the maximum load factor into account. +#[inline] +fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { + if bucket_mask < 8 { + // For tables with 1/2/4/8 buckets, we always reserve one empty slot. + // Keep in mind that the bucket mask is one less than the bucket count. + bucket_mask + } else { + // For larger tables we reserve 12.5% of the slots as empty. + ((bucket_mask + 1) / 8) * 7 + } +} + +/// Helper which allows the max calculation for `ctrl_align` to be statically computed for each `T` +/// while keeping the rest of `calculate_layout_for` independent of `T` +#[derive(Copy, Clone)] +struct TableLayout { + size: usize, + ctrl_align: usize, +} + +impl TableLayout { + #[inline] + const fn new() -> Self { + let layout = Layout::new::(); + Self { + size: layout.size(), + ctrl_align: if layout.align() > Group::WIDTH { + layout.align() + } else { + Group::WIDTH + }, + } + } + + #[inline] + fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { + debug_assert!(buckets.is_power_of_two()); + + let TableLayout { size, ctrl_align } = self; + // Manual layout calculation since Layout methods are not yet stable. + let ctrl_offset = + size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); + let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; + + // We need an additional check to ensure that the allocation doesn't + // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295). + if len > isize::MAX as usize - (ctrl_align - 1) { + return None; + } + + Some(( + unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, + ctrl_offset, + )) + } +} + +/// A reference to an empty bucket into which an can be inserted. +pub struct InsertSlot { + index: usize, +} + +/// A reference to a hash table bucket containing a `T`. +/// +/// This is usually just a pointer to the element itself. However if the element +/// is a ZST, then we instead track the index of the element in the table so +/// that `erase` works properly. +pub struct Bucket { + // Actually it is pointer to next element than element itself + // this is needed to maintain pointer arithmetic invariants + // keeping direct pointer to element introduces difficulty. + // Using `NonNull` for variance and niche layout + ptr: NonNull, +} + +// This Send impl is needed for rayon support. This is safe since Bucket is +// never exposed in a public API. +unsafe impl Send for Bucket {} + +impl Clone for Bucket { + #[inline] + fn clone(&self) -> Self { + Self { ptr: self.ptr } + } +} + +impl Bucket { + /// Creates a [`Bucket`] that contain pointer to the data. + /// The pointer calculation is performed by calculating the + /// offset from given `base` pointer (convenience for + /// `base.as_ptr().sub(index)`). + /// + /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// If the `T` is a ZST, then we instead track the index of the element + /// in the table so that `erase` works properly (return + /// `NonNull::new_unchecked((index + 1) as *mut T)`) + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety + /// rules of [`NonNull::new_unchecked`] function. + /// + /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method + /// and [`NonNull::new_unchecked`] function, as well as for the correct + /// logic of the work of this crate, the following rules are necessary and + /// sufficient: + /// + /// * the `base` pointer must not be `dangling` and must points to the + /// end of the first `value element` from the `data part` of the table, i.e. + /// must be the pointer that returned by [`RawTable::data_end`] or by + /// [`RawTableInner::data_end`]; + /// + /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` + /// must be no greater than the number returned by the function + /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the + /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` + /// must be no greater than the number returned by the function + /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// [`Bucket`]: crate::raw::Bucket + /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 + /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked + /// [`RawTable::data_end`]: crate::raw::RawTable::data_end + /// [`RawTableInner::data_end`]: RawTableInner::data_end + /// [`RawTable::buckets`]: crate::raw::RawTable::buckets + /// [`RawTableInner::buckets`]: RawTableInner::buckets + #[inline] + unsafe fn from_base_index(base: NonNull, index: usize) -> Self { + // If mem::size_of::() != 0 then return a pointer to an `element` in + // the data part of the table (we start counting from "0", so that + // in the expression T[last], the "last" index actually one less than the + // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"): + // + // `from_base_index(base, 1).as_ptr()` returns a pointer that + // points here in the data part of the table + // (to the start of T1) + // | + // | `base: NonNull` must point here + // | (to the end of T0 or to the start of C0) + // v v + // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast + // ^ + // `from_base_index(base, 1)` returns a pointer + // that points here in the data part of the table + // (to the end of T1) + // + // where: T0...Tlast - our stored data; C0...Clast - control bytes + // or metadata for data. + let ptr = if T::IS_ZERO_SIZED { + // won't overflow because index must be less than length (bucket_mask) + // and bucket_mask is guaranteed to be less than `isize::MAX` + // (see TableLayout::calculate_layout_for method) + invalid_mut(index + 1) + } else { + base.as_ptr().sub(index) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + + /// Calculates the index of a [`Bucket`] as distance between two pointers + /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`). + /// The returned value is in units of T: the distance in bytes divided by + /// [`core::mem::size_of::()`]. + /// + /// If the `T` is a ZST, then we return the index of the element in + /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`). + /// + /// This function is the inverse of [`from_base_index`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`. + /// + /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`] + /// method, as well as for the correct logic of the work of this crate, the + /// following rules are necessary and sufficient: + /// + /// * `base` contained pointer must not be `dangling` and must point to the + /// end of the first `element` from the `data part` of the table, i.e. + /// must be a pointer that returns by [`RawTable::data_end`] or by + /// [`RawTableInner::data_end`]; + /// + /// * `self` also must not contain dangling pointer; + /// + /// * both `self` and `base` must be created from the same [`RawTable`] + /// (or [`RawTableInner`]). + /// + /// If `mem::size_of::() == 0`, this function is always safe. + /// + /// [`Bucket`]: crate::raw::Bucket + /// [`from_base_index`]: crate::raw::Bucket::from_base_index + /// [`RawTable::data_end`]: crate::raw::RawTable::data_end + /// [`RawTableInner::data_end`]: RawTableInner::data_end + /// [`RawTable`]: crate::raw::RawTable + /// [`RawTableInner`]: RawTableInner + /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from + #[inline] + unsafe fn to_base_index(&self, base: NonNull) -> usize { + // If mem::size_of::() != 0 then return an index under which we used to store the + // `element` in the data part of the table (we start counting from "0", so + // that in the expression T[last], the "last" index actually is one less than the + // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"). + // For example for 5th element in table calculation is performed like this: + // + // mem::size_of::() + // | + // | `self = from_base_index(base, 5)` that returns pointer + // | that points here in the data part of the table + // | (to the end of T5) + // | | `base: NonNull` must point here + // v | (to the end of T0 or to the start of C0) + // /???\ v v + // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast + // \__________ __________/ + // \/ + // `bucket.to_base_index(base)` = 5 + // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::() + // + // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data. + if T::IS_ZERO_SIZED { + // this can not be UB + self.ptr.as_ptr() as usize - 1 + } else { + offset_from(base.as_ptr(), self.ptr.as_ptr()) + } + } + + /// Acquires the underlying raw pointer `*mut T` to `data`. + /// + /// # Note + /// + /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the + /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because + /// for properly dropping the data we also need to clear `data` control bytes. If we + /// drop data, but do not clear `data control byte` it leads to double drop when + /// [`RawTable`] goes out of scope. + /// + /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new + /// `T` value and its borrowed form *must* match those for the old `T` value, as the map + /// will not re-evaluate where the new value should go, meaning the value may become + /// "lost" if their location does not reflect their state. + /// + /// [`RawTable`]: crate::raw::RawTable + /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + #[inline] + pub fn as_ptr(&self) -> *mut T { + if T::IS_ZERO_SIZED { + // Just return an arbitrary ZST pointer which is properly aligned + // invalid pointer is good enough for ZST + invalid_mut(mem::align_of::()) + } else { + unsafe { self.ptr.as_ptr().sub(1) } + } + } + + /// Acquires the underlying non-null pointer `*mut T` to `data`. + #[inline] + fn as_non_null(&self) -> NonNull { + // SAFETY: `self.ptr` is already a `NonNull` + unsafe { NonNull::new_unchecked(self.as_ptr()) } + } + + /// Create a new [`Bucket`] that is offset from the `self` by the given + /// `offset`. The pointer calculation is performed by calculating the + /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`). + /// This function is used for iterators. + /// + /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety + /// rules of [`NonNull::new_unchecked`] function. + /// + /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method + /// and [`NonNull::new_unchecked`] function, as well as for the correct + /// logic of the work of this crate, the following rules are necessary and + /// sufficient: + /// + /// * `self` contained pointer must not be `dangling`; + /// + /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other + /// words, `self.to_base_index() + offset + 1` must be no greater than the number returned + /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the + /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words, + /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the + /// function [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// [`Bucket`]: crate::raw::Bucket + /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 + /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked + /// [`RawTable::buckets`]: crate::raw::RawTable::buckets + /// [`RawTableInner::buckets`]: RawTableInner::buckets + #[inline] + unsafe fn next_n(&self, offset: usize) -> Self { + let ptr = if T::IS_ZERO_SIZED { + // invalid pointer is good enough for ZST + invalid_mut(self.ptr.as_ptr() as usize + offset) + } else { + self.ptr.as_ptr().sub(offset) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + + /// Executes the destructor (if any) of the pointed-to `data`. + /// + /// # Safety + /// + /// See [`ptr::drop_in_place`] for safety concerns. + /// + /// You should use [`RawTable::erase`] instead of this function, + /// or be careful with calling this function directly, because for + /// properly dropping the data we need also clear `data` control bytes. + /// If we drop data, but do not erase `data control byte` it leads to + /// double drop when [`RawTable`] goes out of scope. + /// + /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html + /// [`RawTable`]: crate::raw::RawTable + /// [`RawTable::erase`]: crate::raw::RawTable::erase + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) unsafe fn drop(&self) { + self.as_ptr().drop_in_place(); + } + + /// Reads the `value` from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// # Safety + /// + /// See [`ptr::read`] for safety concerns. + /// + /// You should use [`RawTable::remove`] instead of this function, + /// or be careful with calling this function directly, because compiler + /// calls its destructor when the read `value` goes out of scope. It + /// can cause double dropping when [`RawTable`] goes out of scope, + /// because of not erased `data control byte`. + /// + /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html + /// [`RawTable`]: crate::raw::RawTable + /// [`RawTable::remove`]: crate::raw::RawTable::remove + #[inline] + pub(crate) unsafe fn read(&self) -> T { + self.as_ptr().read() + } + + /// Overwrites a memory location with the given `value` without reading + /// or dropping the old value (like [`ptr::write`] function). + /// + /// # Safety + /// + /// See [`ptr::write`] for safety concerns. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + #[inline] + pub(crate) unsafe fn write(&self, val: T) { + self.as_ptr().write(val); + } + + /// Returns a shared immutable reference to the `value`. + /// + /// # Safety + /// + /// See [`NonNull::as_ref`] for safety concerns. + /// + /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref + #[inline] + pub unsafe fn as_ref<'a>(&self) -> &'a T { + &*self.as_ptr() + } + + /// Returns a unique mutable reference to the `value`. + /// + /// # Safety + /// + /// See [`NonNull::as_mut`] for safety concerns. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + #[inline] + pub unsafe fn as_mut<'a>(&self) -> &'a mut T { + &mut *self.as_ptr() + } +} + +/// A raw hash table with an unsafe API. +pub struct RawTable { + table: RawTableInner, + alloc: A, + // Tell dropck that we own instances of T. + marker: PhantomData, +} + +/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless +/// of how many different key-value types are used. +struct RawTableInner { + // Mask to get an index from a hash value. The value is one less than the + // number of buckets in the table. + bucket_mask: usize, + + // [Padding], T_n, ..., T1, T0, C0, C1, ... + // ^ points here + ctrl: NonNull, + + // Number of elements that can be inserted before we need to grow the table + growth_left: usize, + + // Number of elements in the table, only really used by len() + items: usize, +} + +impl RawTable { + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + pub const fn new() -> Self { + Self { + table: RawTableInner::NEW, + alloc: Global, + marker: PhantomData, + } + } + + /// Allocates a new hash table with at least enough capacity for inserting + /// the given number of elements without reallocating. + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_in(capacity, Global) + } +} + +impl RawTable { + const TABLE_LAYOUT: TableLayout = TableLayout::new::(); + + /// Creates a new empty hash table without allocating any memory, using the + /// given allocator. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + pub const fn new_in(alloc: A) -> Self { + Self { + table: RawTableInner::NEW, + alloc, + marker: PhantomData, + } + } + + /// Allocates a new hash table with the given number of buckets. + /// + /// The control bytes are left uninitialized. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + alloc: A, + buckets: usize, + fallibility: Fallibility, + ) -> Result { + debug_assert!(buckets.is_power_of_two()); + + Ok(Self { + table: RawTableInner::new_uninitialized( + &alloc, + Self::TABLE_LAYOUT, + buckets, + fallibility, + )?, + alloc, + marker: PhantomData, + }) + } + + /// Allocates a new hash table using the given allocator, with at least enough capacity for + /// inserting the given number of elements without reallocating. + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self { + table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity), + alloc, + marker: PhantomData, + } + } + + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + &self.alloc + } + + /// Returns pointer to one past last `data` element in the table as viewed from + /// the start point of the allocation. + /// + /// The caller must ensure that the `RawTable` outlives the returned [`NonNull`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + pub fn data_end(&self) -> NonNull { + // `self.table.ctrl.cast()` returns pointer that + // points here (to the end of `T0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTable::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + // with loading `Group` bytes from the heap works properly, even if the result + // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + // `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + self.table.ctrl.cast() + } + + /// Returns pointer to start of data table. + #[inline] + #[cfg(feature = "nightly")] + pub unsafe fn data_start(&self) -> NonNull { + NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets())) + } + + /// Returns the total amount of memory allocated internally by the hash + /// table, in bytes. + /// + /// The returned number is informational only. It is intended to be + /// primarily used for memory profiling. + #[inline] + pub fn allocation_size(&self) -> usize { + // SAFETY: We use the same `table_layout` that was used to allocate + // this table. + unsafe { self.table.allocation_size_or_zero(Self::TABLE_LAYOUT) } + } + + /// Returns the index of a bucket from a `Bucket`. + #[inline] + pub unsafe fn bucket_index(&self, bucket: &Bucket) -> usize { + bucket.to_base_index(self.data_end()) + } + + /// Returns a pointer to an element in the table. + /// + /// The caller must ensure that the `RawTable` outlives the returned [`Bucket`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the caller of this function must observe the + /// following safety rules: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`. + /// + /// It is safe to call this function with index of zero (`index == 0`) on a table that has + /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the `index` must + /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. + /// `(index + 1) <= self.buckets()`. + /// + /// [`RawTable::buckets`]: RawTable::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + pub unsafe fn bucket(&self, index: usize) -> Bucket { + // If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table + // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"): + // + // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` + // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`) + // | + // | `base = self.data_end()` points here + // | (to the start of CT0 or to the end of T0) + // v v + // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + // ^ \__________ __________/ + // `table.bucket(3)` returns a pointer that points \/ + // here in the `data` part of the `RawTable` (to additional control bytes + // the end of T3) `m = Group::WIDTH - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`; + // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask` + // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`. + debug_assert_ne!(self.table.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + /// Erases an element from the table without dropping it. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn erase_no_drop(&mut self, item: &Bucket) { + let index = self.bucket_index(item); + self.table.erase(index); + } + + /// Erases an element from the table, dropping it in place. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + pub unsafe fn erase(&mut self, item: Bucket) { + // Erase the element from the table first since drop might panic. + self.erase_no_drop(&item); + item.drop(); + } + + /// Removes an element from the table, returning it. + /// + /// This also returns an `InsertSlot` pointing to the newly free bucket. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + pub unsafe fn remove(&mut self, item: Bucket) -> (T, InsertSlot) { + self.erase_no_drop(&item); + ( + item.read(), + InsertSlot { + index: self.bucket_index(&item), + }, + ) + } + + /// Finds and removes an element from the table, returning it. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { self.remove(bucket).0 }), + None => None, + } + } + + /// Marks all table buckets as empty without dropping their contents. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear_no_drop(&mut self) { + self.table.clear_no_drop(); + } + + /// Removes all elements from the table without freeing the backing memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + if self.is_empty() { + // Special case empty table to avoid surprising O(capacity) time. + return; + } + // Ensure that the table is reset even if one of the drops panic + let mut self_ = guard(self, |self_| self_.clear_no_drop()); + unsafe { + // SAFETY: ScopeGuard sets to zero the `items` field of the table + // even in case of panic during the dropping of the elements so + // that there will be no double drop of the elements. + self_.table.drop_elements::(); + } + } + + /// Shrinks the table to fit `max(self.len(), min_size)` elements. + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { + // Calculate the minimal number of elements that we need to reserve + // space for. + let min_size = usize::max(self.table.items, min_size); + if min_size == 0 { + let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } + return; + } + + // Calculate the number of buckets that we need for this number of + // elements. If the calculation overflows then the requested bucket + // count must be larger than what we have right and nothing needs to be + // done. + let min_buckets = match capacity_to_buckets(min_size) { + Some(buckets) => buckets, + None => return, + }; + + // If we have more buckets than we need, shrink the table. + if min_buckets < self.buckets() { + // Fast path if the table is empty + if self.table.items == 0 { + let new_inner = + RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size); + let mut old_inner = mem::replace(&mut self.table, new_inner); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } + } else { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + unsafe { + // SAFETY: + // 1. We know for sure that `min_size >= self.table.items`. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose RawTable::new_uninitialized in a public API. + if self + .resize(min_size, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: The result of calling the `resize` function cannot be an error + // because `fallibility == Fallibility::Infallible. + hint::unreachable_unchecked() + } + } + } + } + } + + /// Ensures that at least `additional` items can be inserted into the table + /// without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + if unlikely(additional > self.table.growth_left) { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + unsafe { + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we will never expose RawTable::new_uninitialized in a public API. + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`. + hint::unreachable_unchecked() + } + } + } + } + + /// Tries to ensure that at least `additional` items can be inserted into + /// the table without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + if additional > self.table.growth_left { + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we will never expose RawTable::new_uninitialized in a public API. + unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) } + } else { + Ok(()) + } + } + + /// Out-of-line slow path for `reserve` and `try_reserve`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[cold] + #[inline(never)] + unsafe fn reserve_rehash( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + fallibility: Fallibility, + ) -> Result<(), TryReserveError> { + unsafe { + // SAFETY: + // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 2. The `drop` function is the actual drop function of the elements stored in + // the table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. + self.table.reserve_rehash_inner( + &self.alloc, + additional, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + if T::NEEDS_DROP { + Some(|ptr| ptr::drop_in_place(ptr as *mut T)) + } else { + None + }, + ) + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// The caller of this function must ensure that `capacity >= self.table.items` + /// otherwise: + /// + /// * If `self.table.items != 0`, calling of this function with `capacity` + /// equal to 0 (`capacity == 0`) results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function are never return (will go into an + /// infinite loop). + /// + /// See [`RawTableInner::find_insert_slot`] for more information. + /// + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn resize( + &mut self, + capacity: usize, + hasher: impl Fn(&T) -> u64, + fallibility: Fallibility, + ) -> Result<(), TryReserveError> { + // SAFETY: + // 1. The caller of this function guarantees that `capacity >= self.table.items`. + // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. + self.table.resize_inner( + &self.alloc, + capacity, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + ) + } + + /// Inserts a new element into the table, and returns its raw bucket. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { + unsafe { + // SAFETY: + // 1. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose `RawTable::new_uninitialized` in a public API. + // + // 2. We reserve additional space (if necessary) right after calling this function. + let mut slot = self.table.find_insert_slot(hash); + + // We can avoid growing the table once we have reached our load factor if we are replacing + // a tombstone. This works since the number of EMPTY slots does not change in this case. + // + // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index + // in the range `0..=self.buckets()`. + let old_ctrl = *self.table.ctrl(slot.index); + if unlikely(self.table.growth_left == 0 && old_ctrl.special_is_empty()) { + self.reserve(1, hasher); + // SAFETY: We know for sure that `RawTableInner` has control bytes + // initialized and that there is extra space in the table. + slot = self.table.find_insert_slot(hash); + } + + self.insert_in_slot(hash, slot, value) + } + } + + /// Inserts a new element into the table, and returns a mutable reference to it. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { + unsafe { self.insert(hash, value, hasher).as_mut() } + } + + /// Inserts a new element into the table, without growing the table. + /// + /// There must be enough space in the table to insert the new element. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "rustc-internal-api")] + pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { + let (index, old_ctrl) = self.table.prepare_insert_slot(hash); + let bucket = self.table.bucket(index); + + // If we are replacing a DELETED entry then we don't need to update + // the load counter. + self.table.growth_left -= old_ctrl.special_is_empty() as usize; + + bucket.write(value); + self.table.items += 1; + bucket + } + + /// Temporary removes a bucket, applying the given function to the removed + /// element and optionally put back the returned value in the same bucket. + /// + /// Returns `true` if the bucket still contains an element + /// + /// This does not check if the given bucket is actually occupied. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool + where + F: FnOnce(T) -> Option, + { + let index = self.bucket_index(&bucket); + let old_ctrl = *self.table.ctrl(index); + debug_assert!(self.is_bucket_full(index)); + let old_growth_left = self.table.growth_left; + let item = self.remove(bucket).0; + if let Some(new_item) = f(item) { + self.table.growth_left = old_growth_left; + self.table.set_ctrl(index, old_ctrl); + self.table.items += 1; + self.bucket(index).write(new_item); + true + } else { + false + } + } + + /// Searches for an element in the table. If the element is not found, + /// returns `Err` with the position of a slot where an element with the + /// same hash could be inserted. + /// + /// This function may resize the table if additional space is required for + /// inserting an element. + #[inline] + pub fn find_or_find_insert_slot( + &mut self, + hash: u64, + mut eq: impl FnMut(&T) -> bool, + hasher: impl Fn(&T) -> u64, + ) -> Result, InsertSlot> { + self.reserve(1, hasher); + + unsafe { + // SAFETY: + // 1. We know for sure that there is at least one empty `bucket` in the table. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will + // never expose `RawTable::new_uninitialized` in a public API. + // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket, + // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in + // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe. + match self + .table + .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref())) + { + // SAFETY: See explanation above. + Ok(index) => Ok(self.bucket(index)), + Err(slot) => Err(slot), + } + } + } + + /// Inserts a new element into the table in the given slot, and returns its + /// raw bucket. + /// + /// # Safety + /// + /// `slot` must point to a slot previously returned by + /// `find_or_find_insert_slot`, and no mutation of the table must have + /// occurred since that call. + #[inline] + pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket { + let old_ctrl = *self.table.ctrl(slot.index); + self.table.record_item_insert_at(slot.index, old_ctrl, hash); + + let bucket = self.bucket(slot.index); + bucket.write(value); + bucket + } + + /// Searches for an element in the table. + #[inline] + pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { + unsafe { + // SAFETY: + // 1. The [`RawTableInner`] must already have properly initialized control bytes since we + // will never expose `RawTable::new_uninitialized` in a public API. + // 1. The `find_inner` function returns the `index` of only the full bucket, which is in + // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref` + // is safe. + let result = self + .table + .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref())); + + // Avoid `Option::map` because it bloats LLVM IR. + match result { + // SAFETY: See explanation above. + Some(index) => Some(self.bucket(index)), + None => None, + } + } + } + + /// Gets a reference to an element in the table. + #[inline] + pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + } + } + + /// Gets a mutable reference to an element in the table. + #[inline] + pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_mut() }), + None => None, + } + } + + /// Attempts to get mutable references to `N` entries in the table at once. + /// + /// Returns an array of length `N` with the results of each query. + /// + /// At most one mutable reference will be returned to any entry. `None` will be returned if any + /// of the hashes are duplicates. `None` will be returned if the hash is not found. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + pub fn get_many_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> [Option<&'_ mut T>; N] { + unsafe { + let ptrs = self.get_many_mut_pointers(hashes, eq); + + for (i, cur) in ptrs.iter().enumerate() { + if cur.is_some() && ptrs[..i].contains(cur) { + panic!("duplicate keys found"); + } + } + // All bucket are distinct from all previous buckets so we're clear to return the result + // of the lookup. + + ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut())) + } + } + + pub unsafe fn get_many_unchecked_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> [Option<&'_ mut T>; N] { + let ptrs = self.get_many_mut_pointers(hashes, eq); + ptrs.map(|ptr| ptr.map(|mut ptr| ptr.as_mut())) + } + + unsafe fn get_many_mut_pointers( + &mut self, + hashes: [u64; N], + mut eq: impl FnMut(usize, &T) -> bool, + ) -> [Option>; N] { + array::from_fn(|i| { + self.find(hashes[i], |k| eq(i, k)) + .map(|cur| cur.as_non_null()) + }) + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the table might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + #[inline] + pub fn capacity(&self) -> usize { + self.table.items + self.table.growth_left + } + + /// Returns the number of elements in the table. + #[inline] + pub fn len(&self) -> usize { + self.table.items + } + + /// Returns `true` if the table contains no elements. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of buckets in the table. + #[inline] + pub fn buckets(&self) -> usize { + self.table.bucket_mask + 1 + } + + /// Checks whether the bucket at `index` is full. + /// + /// # Safety + /// + /// The caller must ensure `index` is less than the number of buckets. + #[inline] + pub unsafe fn is_bucket_full(&self, index: usize) -> bool { + self.table.is_bucket_full(index) + } + + /// Returns an iterator over every element in the table. It is up to + /// the caller to ensure that the `RawTable` outlives the `RawIter`. + /// Because we cannot make the `next` method unsafe on the `RawIter` + /// struct, we have to make the `iter` method unsafe. + #[inline] + pub unsafe fn iter(&self) -> RawIter { + // SAFETY: + // 1. The caller must uphold the safety contract for `iter` method. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose RawTable::new_uninitialized in a public API. + self.table.iter() + } + + /// Returns an iterator over occupied buckets that could match a given hash. + /// + /// `RawTable` only stores 7 bits of the hash value, so this iterator may + /// return items that have a hash value different than the one provided. You + /// should always validate the returned values before using them. + /// + /// It is up to the caller to ensure that the `RawTable` outlives the + /// `RawIterHash`. Because we cannot make the `next` method unsafe on the + /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash { + RawIterHash::new(self, hash) + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> RawDrain<'_, T, A> { + unsafe { + let iter = self.iter(); + self.drain_iter_from(iter) + } + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn drain_iter_from(&mut self, iter: RawIter) -> RawDrain<'_, T, A> { + debug_assert_eq!(iter.len(), self.len()); + RawDrain { + iter, + table: mem::replace(&mut self.table, RawTableInner::NEW), + orig_table: NonNull::from(&mut self.table), + marker: PhantomData, + } + } + + /// Returns an iterator which consumes all elements from the table. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { + debug_assert_eq!(iter.len(), self.len()); + + let allocation = self.into_allocation(); + RawIntoIter { + iter, + allocation, + marker: PhantomData, + } + } + + /// Converts the table into a raw allocation. The contents of the table + /// should be dropped using a `RawIter` before freeing the allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout, A)> { + let alloc = if self.table.is_empty_singleton() { + None + } else { + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = + match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; + Some(( + unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset).cast()) }, + layout, + unsafe { ptr::read(&self.alloc) }, + )) + }; + mem::forget(self); + alloc + } +} + +unsafe impl Send for RawTable +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawTable +where + T: Sync, + A: Sync, +{ +} + +impl RawTableInner { + const NEW: Self = RawTableInner::new(); + + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never accessed + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + const fn new() -> Self { + Self { + // Be careful to cast the entire slice to a raw pointer. + ctrl: unsafe { + NonNull::new_unchecked(Group::static_empty().as_ptr().cast_mut().cast()) + }, + bucket_mask: 0, + items: 0, + growth_left: 0, + } + } +} + +impl RawTableInner { + /// Allocates a new [`RawTableInner`] with the given number of buckets. + /// The control bytes and buckets are left uninitialized. + /// + /// # Safety + /// + /// The caller of this function must ensure that the `buckets` is power of two + /// and also initialize all control bytes of the length `self.bucket_mask + 1 + + /// Group::WIDTH` with the [`Tag::EMPTY`] bytes. + /// + /// See also [`Allocator`] API for other safety concerns. + /// + /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + alloc: &A, + table_layout: TableLayout, + buckets: usize, + fallibility: Fallibility, + ) -> Result + where + A: Allocator, + { + debug_assert!(buckets.is_power_of_two()); + + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) { + Some(lco) => lco, + None => return Err(fallibility.capacity_overflow()), + }; + + let ptr: NonNull = match do_alloc(alloc, layout) { + Ok(block) => block.cast(), + Err(_) => return Err(fallibility.alloc_err(layout)), + }; + + // SAFETY: null pointer will be caught in above check + let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); + Ok(Self { + ctrl, + bucket_mask: buckets - 1, + items: 0, + growth_left: bucket_mask_to_capacity(buckets - 1), + }) + } + + /// Attempts to allocate a new [`RawTableInner`] with at least enough + /// capacity for inserting the given number of elements without reallocating. + /// + /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes. + #[inline] + fn fallible_with_capacity( + alloc: &A, + table_layout: TableLayout, + capacity: usize, + fallibility: Fallibility, + ) -> Result + where + A: Allocator, + { + if capacity == 0 { + Ok(Self::NEW) + } else { + // SAFETY: We checked that we could successfully allocate the new table, and then + // initialized all control bytes with the constant `Tag::EMPTY` byte. + unsafe { + let buckets = + capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; + + let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. + result + .ctrl(0) + .write_bytes(Tag::EMPTY.0, result.num_ctrl_bytes()); + + Ok(result) + } + } + } + + /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting + /// the given number of elements without reallocating. + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to + /// handle memory allocation failure. + /// + /// All the control bytes are initialized with the [`Tag::EMPTY`] bytes. + /// + /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + fn with_capacity(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self + where + A: Allocator, + { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) { + Ok(table_inner) => table_inner, + // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`. + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + + /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method. + /// + /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control + /// bytes outside the range of the table are filled with [`Tag::EMPTY`] entries. These will unfortunately + /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because + /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking + /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied. + /// We detect this situation here and perform a second scan starting at the beginning of the table. + /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the + /// trailing control bytes (containing [`Tag::EMPTY`] bytes). + /// + /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an + /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and + /// `Safety`). + /// + /// # Warning + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than + /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the + /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that + /// index will cause immediate [`undefined behavior`]. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method. + /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work + /// of this crate, the following rules are necessary and sufficient: + /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this + /// function results in [`undefined behavior`]. + /// + /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`] + /// (after the `find_insert_slot_in_group` function, but before insertion into the table). + /// + /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()` + /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function). + /// + /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`] + /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the + /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`). + /// + /// [`RawTableInner::ctrl`]: RawTableInner::ctrl + /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot { + // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`. + if unlikely(self.is_bucket_full(index)) { + debug_assert!(self.bucket_mask < Group::WIDTH); + // SAFETY: + // + // * Since the caller of this function ensures that the control bytes are properly + // initialized and `ptr = self.ctrl(0)` points to the start of the array of control + // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH` + // and points to the properly initialized control bytes (see also + // `TableLayout::calculate_layout_for` and `ptr::read`); + // + // * Because the caller of this function ensures that the index was provided by the + // `self.find_insert_slot_in_group()` function, so for for tables larger than the + // group width (self.buckets() >= Group::WIDTH), we will never end up in the given + // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` + // cannot return a full bucket index. For tables smaller than the group width, calling + // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside + // the range of the table are filled with EMPTY bytes (and we know for sure that there + // is at least one FULL bucket), so this second scan either finds an empty slot (due to + // the load factor) or hits the trailing control bytes (containing EMPTY). + index = Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit() + .unwrap_unchecked(); + } + InsertSlot { index } + } + + /// Finds the position to insert something in a group. + /// + /// **This may have false positives and must be fixed up with `fix_insert_slot` + /// before it's used.** + /// + /// The function is guaranteed to return the index of an empty or deleted [`Bucket`] + /// in the range `0..self.buckets()` (`0..=self.bucket_mask`). + #[inline] + fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option { + let bit = group.match_empty_or_deleted().lowest_set_bit(); + + if likely(bit.is_some()) { + // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask) + } else { + None + } + } + + /// Searches for an element in the table, or a potential slot where that element could + /// be inserted (an empty or deleted [`Bucket`] index). + /// + /// This uses dynamic dispatch to reduce the amount of code generated, but that is + /// eliminated by LLVM optimizations. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the + /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function + /// will never return (will go into an infinite loop) for tables larger than the group + /// width, or return an index outside of the table indices range if the table is less + /// than the group width. + /// + /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` + /// function with only `FULL` buckets' indices and return the `index` of the found + /// element (as `Ok(index)`). If the element is not found and there is at least 1 + /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return + /// [`InsertSlot`] with an index in the range `0..self.buckets()`, but in any case, + /// if this function returns [`InsertSlot`], it will contain an index in the range + /// `0..=self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`] + /// control bytes outside the table range. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn find_or_find_insert_slot_inner( + &self, + hash: u64, + eq: &mut dyn FnMut(usize) -> bool, + ) -> Result { + let mut insert_slot = None; + + let tag_hash = Tag::full(hash); + let mut probe_seq = self.probe_seq(hash); + + loop { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask` and also because the number + // of buckets is a power of two (see `self.probe_seq` function). + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + for bit in group.match_tag(tag_hash) { + let index = (probe_seq.pos + bit) & self.bucket_mask; + + if likely(eq(index)) { + return Ok(index); + } + } + + // We didn't find the element we were looking for in the group, try to get an + // insertion slot from the group if we don't have one yet. + if likely(insert_slot.is_none()) { + insert_slot = self.find_insert_slot_in_group(&group, &probe_seq); + } + + // Only stop the search if the group contains at least one empty element. + // Otherwise, the element that we are looking for might be in a following group. + if likely(group.match_empty().any_bit_set()) { + // We must have found a insert slot by now, since the current group contains at + // least one. For tables smaller than the group width, there will still be an + // empty element in the current (and only) group due to the load factor. + unsafe { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * We use this function with the slot / index found by `self.find_insert_slot_in_group` + return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked())); + } + } + + probe_seq.move_next(self.bucket_mask); + } + } + + /// Searches for an empty or deleted bucket which is suitable for inserting a new + /// element and sets the hash for that slot. Returns an index of that slot and the + /// old control byte stored in the found index. + /// + /// This function does not check if the given element exists in the table. Also, + /// this function does not check if there is enough space in the table to insert + /// a new element. The caller of the function must make sure that the table has at + /// least 1 empty or deleted `bucket`, otherwise this function will never return + /// (will go into an infinite loop) for tables larger than the group width, or + /// return an index outside of the table indices range if the table is less than + /// the group width. + /// + /// If there is at least 1 empty or deleted `bucket` in the table, the function is + /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case, + /// if this function returns an `index` it will be in the range `0..=self.buckets()`. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for the + /// [`RawTableInner::set_ctrl_hash`] and [`RawTableInner::find_insert_slot`] methods. + /// Thus, in order to uphold the safety contracts for that methods, as well as for + /// the correct logic of the work of this crate, you must observe the following rules + /// when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated and has properly initialized + /// control bytes otherwise calling this function results in [`undefined behavior`]. + /// + /// * The caller of this function must ensure that the "data" parts of the table + /// will have an entry in the returned index (matching the given hash) right + /// after calling this function. + /// + /// Attempt to write data at the `index` returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`] + /// control bytes outside the table range. + /// + /// The caller must independently increase the `items` field of the table, and also, + /// if the old control byte was [`Tag::EMPTY`], then decrease the table's `growth_left` + /// field, and do not change it if the old control byte was [`Tag::DELETED`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`RawTableInner::ctrl`]: RawTableInner::ctrl + /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + #[inline] + unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, Tag) { + // SAFETY: Caller of this function ensures that the control bytes are properly initialized. + let index: usize = self.find_insert_slot(hash).index; + // SAFETY: + // 1. The `find_insert_slot` function either returns an `index` less than or + // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never + // returns if it cannot find an empty or deleted slot. + // 2. The caller of this function guarantees that the table has already been + // allocated + let old_ctrl = *self.ctrl(index); + self.set_ctrl_hash(index, hash); + (index, old_ctrl) + } + + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element, returning the `index` for the new [`Bucket`]. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise this function + /// will never return (will go into an infinite loop) for tables larger than the group + /// width, or return an index outside of the table indices range if the table is less + /// than the group width. + /// + /// If there is at least 1 empty or deleted `bucket` in the table, the function is + /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`, + /// but in any case, if this function returns [`InsertSlot`], it will contain an index + /// in the range `0..=self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`Tag::EMPTY`] + /// control bytes outside the table range. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot { + let mut probe_seq = self.probe_seq(hash); + loop { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask` and also because the number + // of buckets is a power of two (see `self.probe_seq` function). + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + let index = self.find_insert_slot_in_group(&group, &probe_seq); + if likely(index.is_some()) { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * We use this function with the slot / index found by `self.find_insert_slot_in_group` + unsafe { + return self.fix_insert_slot(index.unwrap_unchecked()); + } + } + probe_seq.move_next(self.bucket_mask); + } + } + + /// Searches for an element in a table, returning the `index` of the found element. + /// This uses dynamic dispatch to reduce the amount of code generated, but it is + /// eliminated by LLVM optimizations. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty `bucket`, otherwise, if the + /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, + /// this function will also never return (will go into an infinite loop). + /// + /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` + /// function with only `FULL` buckets' indices and return the `index` of the found + /// element as `Some(index)`, so the index will always be in the range + /// `0..self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline(always)] + unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { + let tag_hash = Tag::full(hash); + let mut probe_seq = self.probe_seq(hash); + + loop { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask`. + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new_in). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + for bit in group.match_tag(tag_hash) { + // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let index = (probe_seq.pos + bit) & self.bucket_mask; + + if likely(eq(index)) { + return Some(index); + } + } + + if likely(group.match_empty().any_bit_set()) { + return None; + } + + probe_seq.move_next(self.bucket_mask); + } + } + + /// Prepares for rehashing data in place (that is, without allocating new memory). + /// Converts all full index `control bytes` to `Tag::DELETED` and all `Tag::DELETED` control + /// bytes to `Tag::EMPTY`, i.e. performs the following conversion: + /// + /// - `Tag::EMPTY` control bytes -> `Tag::EMPTY`; + /// - `Tag::DELETED` control bytes -> `Tag::EMPTY`; + /// - `FULL` control bytes -> `Tag::DELETED`. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The caller of this function must convert the `Tag::DELETED` bytes back to `FULL` + /// bytes when re-inserting them into their ideal position (which was impossible + /// to do during the first insert due to tombstones). If the caller does not do + /// this, then calling this function may result in a memory leak. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise + /// calling this function results in [`undefined behavior`]. + /// + /// Calling this function on a table that has not been allocated results in + /// [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::mut_mut)] + #[inline] + unsafe fn prepare_rehash_in_place(&mut self) { + // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY. + // This effectively frees up all buckets containing a DELETED entry. + // + // SAFETY: + // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`; + // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned` + // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`; + // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated; + // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0 + // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for). + for i in (0..self.buckets()).step_by(Group::WIDTH) { + let group = Group::load_aligned(self.ctrl(i)); + let group = group.convert_special_to_empty_and_full_to_deleted(); + group.store_aligned(self.ctrl(i)); + } + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + // + // SAFETY: The caller of this function guarantees that [`RawTableInner`] + // has already been allocated + if unlikely(self.buckets() < Group::WIDTH) { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe + self.ctrl(0) + .copy_to(self.ctrl(Group::WIDTH), self.buckets()); + } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe + self.ctrl(0) + .copy_to(self.ctrl(self.buckets()), Group::WIDTH); + } + } + + /// Returns an iterator over every element in the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result + /// is [`undefined behavior`]: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `RawIter`. Because we cannot make the `next` method unsafe on + /// the `RawIter` struct, we have to make the `iter` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The type `T` must be the actual type of the elements stored in the table, + /// otherwise using the returned [`RawIter`] results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn iter(&self) -> RawIter { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.data_end()` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e. + // equal to zero). + // 3. We pass the exact value of buckets of the table to the function. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + // with loading `Group` bytes from the heap works properly, even if the result + // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + // `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + // SAFETY: See explanation above + iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()), + items: self.items, + } + } + + /// Executes the destructors (if any) of the values stored in the table. + /// + /// # Note + /// + /// This function does not erase the control bytes of the table and does + /// not make any changes to the `items` or `growth_left` fields of the + /// table. If necessary, the caller of this function must manually set + /// up these table fields, for example using the [`clear_no_drop`] function. + /// + /// Be careful during calling this function, because drop function of + /// the elements can panic, and this can leave table in an inconsistent + /// state. + /// + /// # Safety + /// + /// The type `T` must be the actual type of the elements stored in the table, + /// otherwise calling this function may result in [`undefined behavior`]. + /// + /// If `T` is a type that should be dropped and **the table is not empty**, + /// calling this function more than once results in [`undefined behavior`]. + /// + /// If `T` is not [`Copy`], attempting to use values stored in the table after + /// calling this function may result in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information + /// about of properly removing or saving `element` from / into the [`RawTable`] / + /// [`RawTableInner`]. + /// + /// [`Bucket::drop`]: Bucket::drop + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`clear_no_drop`]: RawTableInner::clear_no_drop + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_elements(&mut self) { + // Check that `self.items != 0`. Protects against the possibility + // of creating an iterator on an table with uninitialized control bytes. + if T::NEEDS_DROP && self.items != 0 { + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `RawIter` iterator, and the caller of this function + // must uphold the safety contract for `drop_elements` method. + for item in self.iter::() { + // SAFETY: The caller must uphold the safety contract for + // `drop_elements` method. + item.drop(); + } + } + } + + /// Executes the destructors (if any) of the values stored in the table and than + /// deallocates the table. + /// + /// # Note + /// + /// Calling this function automatically makes invalid (dangling) all instances of + /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table. + /// + /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left` + /// fields of the table. If necessary, the caller of this function must manually set + /// up these table fields. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * Calling this function more than once; + /// + /// * The type `T` must be the actual type of the elements stored in the table. + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that + /// was used to allocate this table. + /// + /// The caller of this function should pay attention to the possibility of the + /// elements' drop function panicking, because this: + /// + /// * May leave the table in an inconsistent state; + /// + /// * Memory is never deallocated, so a memory leak may occur. + /// + /// Attempt to use the `ctrl` field of the table (dereference) after calling this + /// function results in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`] + /// for more information. + /// + /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements + /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_inner_table(&mut self, alloc: &A, table_layout: TableLayout) { + if !self.is_empty_singleton() { + unsafe { + // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. + self.drop_elements::(); + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller must uphold the safety contract for `drop_inner_table` method. + self.free_buckets(alloc, table_layout); + } + } + } + + /// Returns a pointer to an element in the table (convenience for + /// `Bucket::from_base_index(self.data_end::(), index)`). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived from the + /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling + /// this function, the following safety rules must be observed: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`. + /// + /// * The type `T` must be the actual type of the elements stored in the table, otherwise + /// using the returned [`Bucket`] may result in [`undefined behavior`]. + /// + /// It is safe to call this function with index of zero (`index == 0`) on a table that has + /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the `index` must + /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. + /// `(index + 1) <= self.buckets()`. + /// + /// ```none + /// If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table + /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): + /// + /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` + /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`]) + /// | + /// | `base = table.data_end::()` points here + /// | (to the start of CT0 or to the end of T0) + /// v v + /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + /// ^ \__________ __________/ + /// `table.bucket(3)` returns a pointer that points \/ + /// here in the `data` part of the `RawTableInner` additional control bytes + /// (to the end of T3) `m = Group::WIDTH - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`; + /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` + /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`Bucket::from_base_index`]: Bucket::from_base_index + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn bucket(&self, index: usize) -> Bucket { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table + /// (convenience for `self.data_end::().as_ptr().sub((index + 1) * size_of)`). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`, + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`; + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// ```none + /// If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table + /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): + /// + /// `table.bucket_ptr(3, mem::size_of::())` returns a pointer that points here in the + /// `data` part of the `RawTableInner`, i.e. to the start of T3 + /// | + /// | `base = table.data_end::()` points here + /// | (to the start of CT0 or to the end of T0) + /// v v + /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + /// \__________ __________/ + /// \/ + /// additional control bytes + /// `m = Group::WIDTH - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`; + /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` + /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + let base: *mut u8 = self.data_end().as_ptr(); + base.sub((index + 1) * size_of) + } + + /// Returns pointer to one past last `data` element in the table as viewed from + /// the start point of the allocation (convenience for `self.ctrl.cast()`). + /// + /// This function actually returns a pointer to the end of the `data element` at + /// index "0" (zero). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Note + /// + /// The type `T` must be the actual type of the elements stored in the table, otherwise + /// using the returned [`NonNull`] may result in [`undefined behavior`]. + /// + /// ```none + /// `table.data_end::()` returns pointer that points here + /// (to the end of `T0`) + /// ∨ + /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + /// \________ ________/ + /// \/ + /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`. + /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + /// with loading `Group` bytes from the heap works properly, even if the result + /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + /// `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + fn data_end(&self) -> NonNull { + self.ctrl.cast() + } + + /// Returns an iterator-like object for a probe sequence on the table. + /// + /// This iterator never terminates, but is guaranteed to visit each bucket + /// group exactly once. The loop using `probe_seq` must terminate upon + /// reaching a group containing an empty bucket. + #[inline] + fn probe_seq(&self, hash: u64) -> ProbeSeq { + ProbeSeq { + // This is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + pos: h1(hash) & self.bucket_mask, + stride: 0, + } + } + + #[inline] + unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: Tag, hash: u64) { + self.growth_left -= usize::from(old_ctrl.special_is_empty()); + self.set_ctrl_hash(index, hash); + self.items += 1; + } + + #[inline] + fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { + let probe_seq_pos = self.probe_seq(hash).pos; + let probe_index = + |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; + probe_index(i) == probe_index(new_i) + } + + /// Sets a control byte to the hash, and possibly also the replicated control byte at + /// the end of the array. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`] + /// method. Thus, in order to uphold the safety contracts for the method, you must observe the + /// following rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn set_ctrl_hash(&mut self, index: usize, hash: u64) { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_hash`] + self.set_ctrl(index, Tag::full(hash)); + } + + /// Replaces the hash in the control byte at the given index with the provided one, + /// and possibly also replicates the new control byte at the end of the array of control + /// bytes, returning the old control byte. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_hash`] + /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both + /// methods, you must observe the following rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::set_ctrl_hash`]: RawTableInner::set_ctrl_hash + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn replace_ctrl_hash(&mut self, index: usize, hash: u64) -> Tag { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_hash`] + let prev_ctrl = *self.ctrl(index); + self.set_ctrl_hash(index, hash); + prev_ctrl + } + + /// Sets a control byte, and possibly also the replicated control byte at + /// the end of the array. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn set_ctrl(&mut self, index: usize, ctrl: Tag) { + // Replicate the first Group::WIDTH control bytes at the end of + // the array without using a branch. If the tables smaller than + // the group width (self.buckets() < Group::WIDTH), + // `index2 = Group::WIDTH + index`, otherwise `index2` is: + // + // - If index >= Group::WIDTH then index == index2. + // - Otherwise index2 == self.bucket_mask + 1 + index. + // + // The very last replicated control byte is never actually read because + // we mask the initial index for unaligned loads, but we write it + // anyways because it makes the set_ctrl implementation simpler. + // + // If there are fewer buckets than Group::WIDTH then this code will + // replicate the buckets at the end of the trailing group. For example + // with 2 buckets and a group size of 4, the control bytes will look + // like this: + // + // Real | Replicated + // --------------------------------------------- + // | [A] | [B] | [Tag::EMPTY] | [EMPTY] | [A] | [B] | + // --------------------------------------------- + + // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH` + // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; + + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`] + *self.ctrl(index) = ctrl; + *self.ctrl(index2) = ctrl; + } + + /// Returns a pointer to a control byte. + /// + /// # Safety + /// + /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`], + /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`. + /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH` + /// will return a pointer to the end of the allocated table and it is useless on its own. + /// + /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a + /// table that has not been allocated results in [`Undefined Behavior`]. + /// + /// So to satisfy both requirements you should always follow the rule that + /// `index < self.bucket_mask + 1 + Group::WIDTH` + /// + /// Calling this function on [`RawTableInner`] that are not already allocated is safe + /// for read-only purpose. + /// + /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr()`]: Bucket::as_ptr() + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn ctrl(&self, index: usize) -> *mut Tag { + debug_assert!(index < self.num_ctrl_bytes()); + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`] + self.ctrl.as_ptr().add(index).cast() + } + + #[inline] + fn buckets(&self) -> usize { + self.bucket_mask + 1 + } + + /// Checks whether the bucket at `index` is full. + /// + /// # Safety + /// + /// The caller must ensure `index` is less than the number of buckets. + #[inline] + unsafe fn is_bucket_full(&self, index: usize) -> bool { + debug_assert!(index < self.buckets()); + (*self.ctrl(index)).is_full() + } + + #[inline] + fn num_ctrl_bytes(&self) -> usize { + self.bucket_mask + 1 + Group::WIDTH + } + + #[inline] + fn is_empty_singleton(&self) -> bool { + self.bucket_mask == 0 + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating, + /// and return it inside `ScopeGuard` to protect against panic in the hash + /// function. + /// + /// # Note + /// + /// It is recommended (but not required): + /// + /// * That the new table's `capacity` be greater than or equal to `self.items`. + /// + /// * The `alloc` is the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used + /// to allocate this table. + /// + /// If `table_layout` does not match the `TableLayout` that was used to allocate + /// this table, then using `mem::swap` with the `self` and the new table returned + /// by this function results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::mut_mut)] + #[inline] + fn prepare_resize<'a, A>( + &self, + alloc: &'a A, + table_layout: TableLayout, + capacity: usize, + fallibility: Fallibility, + ) -> Result, TryReserveError> + where + A: Allocator, + { + debug_assert!(self.items <= capacity); + + // Allocate and initialize the new table. + let new_table = + RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?; + + // The hash function may panic, in which case we simply free the new + // table without dropping any elements that may have been copied into + // it. + // + // This guard is also used to free the old table on success, see + // the comment at the bottom of this function. + Ok(guard(new_table, move |self_| { + if !self_.is_empty_singleton() { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that the `alloc` and `table_layout` matches the + // [`Allocator`] and [`TableLayout`] used to allocate this table. + unsafe { self_.free_buckets(alloc, table_layout) }; + } + })) + } + + /// Reserves or rehashes to make room for `additional` more elements. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table. + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::inline_always)] + #[inline(always)] + unsafe fn reserve_rehash_inner( + &mut self, + alloc: &A, + additional: usize, + hasher: &dyn Fn(&mut Self, usize) -> u64, + fallibility: Fallibility, + layout: TableLayout, + drop: Option, + ) -> Result<(), TryReserveError> + where + A: Allocator, + { + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let new_items = match self.items.checked_add(additional) { + Some(new_items) => new_items, + None => return Err(fallibility.capacity_overflow()), + }; + let full_capacity = bucket_mask_to_capacity(self.bucket_mask); + if new_items <= full_capacity / 2 { + // Rehash in-place without re-allocating if we have plenty of spare + // capacity that is locked up due to DELETED entries. + + // SAFETY: + // 1. We know for sure that `[`RawTableInner`]` has already been allocated + // (since new_items <= full_capacity / 2); + // 2. The caller ensures that `drop` function is the actual drop function of + // the elements stored in the table. + // 3. The caller ensures that `layout` matches the [`TableLayout`] that was + // used to allocate this table. + // 4. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. + self.rehash_in_place(hasher, layout.size, drop); + Ok(()) + } else { + // Otherwise, conservatively resize to at least the next size up + // to avoid churning deletes into frequent rehashes. + // + // SAFETY: + // 1. We know for sure that `capacity >= self.items`. + // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. + self.resize_inner( + alloc, + usize::max(new_items, full_capacity + 1), + hasher, + fallibility, + layout, + ) + } + } + + /// Returns an iterator over full buckets indices in the table. + /// + /// # Safety + /// + /// Behavior is undefined if any of the following conditions are violated: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `FullBucketsIndices`. Because we cannot make the `next` method + /// unsafe on the `FullBucketsIndices` struct, we have to make the + /// `full_buckets_indices` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + #[inline(always)] + unsafe fn full_buckets_indices(&self) -> FullBucketsIndices { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.ctrl(0)` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. The value of `items` is equal to the amount of data (values) added + // to the table. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + let ctrl = NonNull::new_unchecked(self.ctrl(0).cast::()); + + FullBucketsIndices { + // Load the first group + // SAFETY: See explanation above. + current_group: Group::load_aligned(ctrl.as_ptr().cast()) + .match_full() + .into_iter(), + group_first_index: 0, + ctrl, + items: self.items, + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table; + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The caller of this function must ensure that `capacity >= self.items` + /// otherwise: + /// + /// * If `self.items != 0`, calling of this function with `capacity == 0` + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// are never return (will go into an infinite loop). + /// + /// Note: It is recommended (but not required) that the new table's `capacity` + /// be greater than or equal to `self.items`. In case if `capacity <= self.items` + /// this function can never return. See [`RawTableInner::find_insert_slot`] for + /// more information. + /// + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::inline_always)] + #[inline(always)] + unsafe fn resize_inner( + &mut self, + alloc: &A, + capacity: usize, + hasher: &dyn Fn(&mut Self, usize) -> u64, + fallibility: Fallibility, + layout: TableLayout, + ) -> Result<(), TryReserveError> + where + A: Allocator, + { + // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`] + // that were used to allocate this table. + let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?; + + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `FullBucketsIndices` iterator, and the caller of this + // function ensures that the control bytes are properly initialized. + for full_byte_index in self.full_buckets_indices() { + // This may panic. + let hash = hasher(self, full_byte_index); + + // SAFETY: + // We can use a simpler version of insert() here since: + // 1. There are no DELETED entries. + // 2. We know there is enough space in the table. + // 3. All elements are unique. + // 4. The caller of this function guarantees that `capacity > 0` + // so `new_table` must already have some allocated memory. + // 5. We set `growth_left` and `items` fields of the new table + // after the loop. + // 6. We insert into the table, at the returned index, the data + // matching the given hash immediately after calling this function. + let (new_index, _) = new_table.prepare_insert_slot(hash); + + // SAFETY: + // + // * `src` is valid for reads of `layout.size` bytes, since the + // table is alive and the `full_byte_index` is guaranteed to be + // within bounds (see `FullBucketsIndices::next_impl`); + // + // * `dst` is valid for writes of `layout.size` bytes, since the + // caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate old table and we have the `new_index` + // returned by `prepare_insert_slot`. + // + // * Both `src` and `dst` are properly aligned. + // + // * Both `src` and `dst` point to different region of memory. + ptr::copy_nonoverlapping( + self.bucket_ptr(full_byte_index, layout.size), + new_table.bucket_ptr(new_index, layout.size), + layout.size, + ); + } + + // The hash function didn't panic, so we can safely set the + // `growth_left` and `items` fields of the new table. + new_table.growth_left -= self.items; + new_table.items = self.items; + + // We successfully copied all elements without panicking. Now replace + // self with the new table. The old table will have its memory freed but + // the items will not be dropped (since they have been moved into the + // new table). + // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate this table. + mem::swap(self, &mut new_table); + + Ok(()) + } + + /// Rehashes the contents of the table in place (i.e. without changing the + /// allocation). + /// + /// If `hasher` panics then some the table's contents may be lost. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::inline_always)] + #[cfg_attr(feature = "inline-more", inline(always))] + #[cfg_attr(not(feature = "inline-more"), inline)] + unsafe fn rehash_in_place( + &mut self, + hasher: &dyn Fn(&mut Self, usize) -> u64, + size_of: usize, + drop: Option, + ) { + // If the hash function panics then properly clean up any elements + // that we haven't rehashed yet. We unfortunately can't preserve the + // element since we lost their hash and have no way of recovering it + // without risking another panic. + self.prepare_rehash_in_place(); + + let mut guard = guard(self, move |self_| { + if let Some(drop) = drop { + for i in 0..self_.buckets() { + if *self_.ctrl(i) == Tag::DELETED { + self_.set_ctrl(i, Tag::EMPTY); + drop(self_.bucket_ptr(i, size_of)); + self_.items -= 1; + } + } + } + self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; + }); + + // At this point, DELETED elements are elements that we haven't + // rehashed yet. Find them and re-insert them at their ideal + // position. + 'outer: for i in 0..guard.buckets() { + if *guard.ctrl(i) != Tag::DELETED { + continue; + } + + let i_p = guard.bucket_ptr(i, size_of); + + 'inner: loop { + // Hash the current item + let hash = hasher(*guard, i); + + // Search for a suitable place to put it + // + // SAFETY: Caller of this function ensures that the control bytes + // are properly initialized. + let new_i = guard.find_insert_slot(hash).index; + + // Probing works by scanning through all of the control + // bytes in groups, which may not be aligned to the group + // size. If both the new and old position fall within the + // same unaligned group, then there is no benefit in moving + // it and we can just continue to the next item. + if likely(guard.is_in_same_group(i, new_i, hash)) { + guard.set_ctrl_hash(i, hash); + continue 'outer; + } + + let new_i_p = guard.bucket_ptr(new_i, size_of); + + // We are moving the current item to a new position. Write + // our H2 to the control byte of the new position. + let prev_ctrl = guard.replace_ctrl_hash(new_i, hash); + if prev_ctrl == Tag::EMPTY { + guard.set_ctrl(i, Tag::EMPTY); + // If the target slot is empty, simply move the current + // element into the new slot and clear the old control + // byte. + ptr::copy_nonoverlapping(i_p, new_i_p, size_of); + continue 'outer; + } else { + // If the target slot is occupied, swap the two elements + // and then continue processing the element that we just + // swapped into the old slot. + debug_assert_eq!(prev_ctrl, Tag::DELETED); + ptr::swap_nonoverlapping(i_p, new_i_p, size_of); + continue 'inner; + } + } + } + + guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; + + mem::forget(guard); + } + + /// Deallocates the table without dropping any entries. + /// + /// # Note + /// + /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements), + /// else it can lead to leaking of memory. Also calling this function automatically + /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid + /// (dangling) the `ctrl` field of the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used + /// to allocate this table. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate + #[inline] + unsafe fn free_buckets(&mut self, alloc: &A, table_layout: TableLayout) + where + A: Allocator, + { + // SAFETY: The caller must uphold the safety contract for `free_buckets` + // method. + let (ptr, layout) = self.allocation_info(table_layout); + alloc.deallocate(ptr, layout); + } + + /// Returns a pointer to the allocated memory and the layout that was used to + /// allocate the table. + /// + /// # Safety + /// + /// Caller of this function must observe the following safety rules: + /// + /// * The [`RawTableInner`] has already been allocated, otherwise + /// calling this function results in [`undefined behavior`] + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` + /// that was used to allocate this table. Failure to comply with this condition + /// may result in [`undefined behavior`]. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate + #[inline] + unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull, Layout) { + debug_assert!( + !self.is_empty_singleton(), + "this function can only be called on non-empty tables" + ); + + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; + ( + // SAFETY: The caller must uphold the safety contract for `allocation_info` method. + unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }, + layout, + ) + } + + /// Returns the total amount of memory allocated internally by the hash + /// table, in bytes. + /// + /// The returned number is informational only. It is intended to be + /// primarily used for memory profiling. + /// + /// # Safety + /// + /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout` + /// that was used to allocate this table. Failure to comply with this condition + /// may result in [`undefined behavior`]. + /// + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn allocation_size_or_zero(&self, table_layout: TableLayout) -> usize { + if self.is_empty_singleton() { + 0 + } else { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate this table. + unsafe { self.allocation_info(table_layout).1.size() } + } + } + + /// Marks all table buckets as empty without dropping their contents. + #[inline] + fn clear_no_drop(&mut self) { + if !self.is_empty_singleton() { + unsafe { + self.ctrl(0) + .write_bytes(Tag::EMPTY.0, self.num_ctrl_bytes()); + } + } + self.items = 0; + self.growth_left = bucket_mask_to_capacity(self.bucket_mask); + } + + /// Erases the [`Bucket`]'s control byte at the given index so that it does not + /// triggered as full, decreases the `items` of the table and, if it can be done, + /// increases `self.growth_left`. + /// + /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it + /// does not make any changes to the `data` parts of the table. The caller of this + /// function must take care to properly drop the `data`, otherwise calling this + /// function may result in a memory leak. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * It must be the full control byte at the given position; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// Calling this function on a table with no elements is unspecified, but calling subsequent + /// functions is likely to result in [`undefined behavior`] due to overflow subtraction + /// (`self.items -= 1 cause overflow when self.items == 0`). + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn erase(&mut self, index: usize) { + debug_assert!(self.is_bucket_full(index)); + + // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because + // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; + // SAFETY: + // - The caller must uphold the safety contract for `erase` method; + // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask` + let empty_before = Group::load(self.ctrl(index_before)).match_empty(); + let empty_after = Group::load(self.ctrl(index)).match_empty(); + + // Inserting and searching in the map is performed by two key functions: + // + // - The `find_insert_slot` function that looks up the index of any `Tag::EMPTY` or `Tag::DELETED` + // slot in a group to be able to insert. If it doesn't find an `Tag::EMPTY` or `Tag::DELETED` + // slot immediately in the first group, it jumps to the next `Group` looking for it, + // and so on until it has gone through all the groups in the control bytes. + // + // - The `find_inner` function that looks for the index of the desired element by looking + // at all the `FULL` bytes in the group. If it did not find the element right away, and + // there is no `Tag::EMPTY` byte in the group, then this means that the `find_insert_slot` + // function may have found a suitable slot in the next group. Therefore, `find_inner` + // jumps further, and if it does not find the desired element and again there is no `Tag::EMPTY` + // byte, then it jumps further, and so on. The search stops only if `find_inner` function + // finds the desired element or hits an `Tag::EMPTY` slot/byte. + // + // Accordingly, this leads to two consequences: + // + // - The map must have `Tag::EMPTY` slots (bytes); + // + // - You can't just mark the byte to be erased as `Tag::EMPTY`, because otherwise the `find_inner` + // function may stumble upon an `Tag::EMPTY` byte before finding the desired element and stop + // searching. + // + // Thus it is necessary to check all bytes after and before the erased element. If we are in + // a contiguous `Group` of `FULL` or `Tag::DELETED` bytes (the number of `FULL` or `Tag::DELETED` bytes + // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as + // `Tag::DELETED` in order for the `find_inner` function to go further. On the other hand, if there + // is at least one `Tag::EMPTY` slot in the `Group`, then the `find_inner` function will still stumble + // upon an `Tag::EMPTY` byte, so we can safely mark our erased byte as `Tag::EMPTY` as well. + // + // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index` + // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH) + // cannot have `Tag::DELETED` bytes. + // + // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while + // `trailing_zeros` refers to the bytes at the beginning of a group. + let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { + Tag::DELETED + } else { + self.growth_left += 1; + Tag::EMPTY + }; + // SAFETY: the caller must uphold the safety contract for `erase` method. + self.set_ctrl(index, ctrl); + self.items -= 1; + } +} + +impl Clone for RawTable { + fn clone(&self) -> Self { + if self.table.is_empty_singleton() { + Self::new_in(self.alloc.clone()) + } else { + unsafe { + // Avoid `Result::ok_or_else` because it bloats LLVM IR. + // + // SAFETY: This is safe as we are taking the size of an already allocated table + // and therefore capacity overflow cannot occur, `self.table.buckets()` is power + // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. + let mut new_table = match Self::new_uninitialized( + self.alloc.clone(), + self.table.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + + // Cloning elements may fail (the clone function may panic). But we don't + // need to worry about uninitialized control bits, since: + // 1. The number of items (elements) in the table is zero, which means that + // the control bits will not be read by Drop function. + // 2. The `clone_from_spec` method will first copy all control bits from + // `self` (thus initializing them). But this will not affect the `Drop` + // function, since the `clone_from_spec` function sets `items` only after + // successfully cloning all elements. + new_table.clone_from_spec(self); + new_table + } + } + } + + fn clone_from(&mut self, source: &Self) { + if source.table.is_empty_singleton() { + let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } + } else { + unsafe { + // Make sure that if any panics occurs, we clear the table and + // leave it in an empty state. + let mut self_ = guard(self, |self_| { + self_.clear_no_drop(); + }); + + // First, drop all our elements without clearing the control + // bytes. If this panics then the scope guard will clear the + // table, leaking any elements that were not dropped yet. + // + // This leak is unavoidable: we can't try dropping more elements + // since this could lead to another panic and abort the process. + // + // SAFETY: If something gets wrong we clear our table right after + // dropping the elements, so there is no double drop, since `items` + // will be equal to zero. + self_.table.drop_elements::(); + + // If necessary, resize our table to match the source. + if self_.buckets() != source.buckets() { + let new_inner = match RawTableInner::new_uninitialized( + &self_.alloc, + Self::TABLE_LAYOUT, + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + // Replace the old inner with new uninitialized one. It's ok, since if something gets + // wrong `ScopeGuard` will initialize all control bytes and leave empty table. + let mut old_inner = mem::replace(&mut self_.table, new_inner); + if !old_inner.is_empty_singleton() { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that `alloc` and `table_layout` matches + // the [`Allocator`] and [`TableLayout`] that were used to allocate this table. + old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT); + } + } + + // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` + // inside the `clone_from_impl` function will take care of that, dropping all + // cloned elements if necessary. Our `ScopeGuard` will clear the table. + self_.clone_from_spec(source); + + // Disarm the scope guard if cloning was successful. + ScopeGuard::into_inner(self_); + } + } + } +} + +/// Specialization of `clone_from` for `Copy` types +trait RawTableClone { + unsafe fn clone_from_spec(&mut self, source: &Self); +} +impl RawTableClone for RawTable { + default_fn! { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self) { + self.clone_from_impl(source); + } + } +} +#[cfg(feature = "nightly")] +impl RawTableClone for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self) { + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + source + .data_start() + .as_ptr() + .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets()); + + self.table.items = source.table.items; + self.table.growth_left = source.table.growth_left; + } +} + +impl RawTable { + /// Common code for `clone` and `clone_from`. Assumes: + /// - `self.buckets() == source.buckets()`. + /// - Any existing elements have been dropped. + /// - The control bytes are not initialized yet. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_impl(&mut self, source: &Self) { + // Copy the control bytes unchanged. We do this in a single pass + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + + // The cloning of elements may panic, in which case we need + // to make sure we drop only the elements that have been + // cloned so far. + let mut guard = guard((0, &mut *self), |(index, self_)| { + if T::NEEDS_DROP { + for i in 0..*index { + if self_.is_bucket_full(i) { + self_.bucket(i).drop(); + } + } + } + }); + + for from in source.iter() { + let index = source.bucket_index(&from); + let to = guard.1.bucket(index); + to.write(from.as_ref().clone()); + + // Update the index in case we need to unwind. + guard.0 = index + 1; + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard); + + self.table.items = source.table.items; + self.table.growth_left = source.table.growth_left; + } +} + +impl Default for RawTable { + #[inline] + fn default() -> Self { + Self::new_in(Default::default()) + } +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } + } +} + +impl IntoIterator for RawTable { + type Item = T; + type IntoIter = RawIntoIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> RawIntoIter { + unsafe { + let iter = self.iter(); + self.into_iter_from(iter) + } + } +} + +/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does +/// not track an item count. +pub(crate) struct RawIterRange { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMaskIter, + + // Pointer to the buckets for the current group. + data: Bucket, + + // Pointer to the next group of control bytes, + // Must be aligned to the group size. + next_ctrl: *const u8, + + // Pointer one past the last control byte of this range. + end: *const u8, +} + +impl RawIterRange { + /// Returns a `RawIterRange` covering a subset of a table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`; + /// + /// * `ctrl` must be properly aligned to the group size (`Group::WIDTH`); + /// + /// * `ctrl` must point to the array of properly initialized control bytes; + /// + /// * `data` must be the [`Bucket`] at the `ctrl` index in the table; + /// + /// * the value of `len` must be less than or equal to the number of table buckets, + /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())` + /// must be positive. + /// + /// * The `ctrl.add(len)` pointer must be either in bounds or one + /// byte past the end of the same [allocated table]. + /// + /// * The `len` must be a power of two. + /// + /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { + debug_assert_ne!(len, 0); + debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] + let end = ctrl.add(len); + + // Load the first group and advance ctrl to point to the next group + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] + let current_group = Group::load_aligned(ctrl.cast()).match_full(); + let next_ctrl = ctrl.add(Group::WIDTH); + + Self { + current_group: current_group.into_iter(), + data, + next_ctrl, + end, + } + } + + /// Splits a `RawIterRange` into two halves. + /// + /// Returns `None` if the remaining range is smaller than or equal to the + /// group width. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "rayon")] + pub(crate) fn split(mut self) -> (Self, Option>) { + unsafe { + if self.end <= self.next_ctrl { + // Nothing to split if the group that we are current processing + // is the last one. + (self, None) + } else { + // len is the remaining number of elements after the group that + // we are currently processing. It must be a multiple of the + // group size (small tables are caught by the check above). + let len = offset_from(self.end, self.next_ctrl); + debug_assert_eq!(len % Group::WIDTH, 0); + + // Split the remaining elements into two halves, but round the + // midpoint down in case there is an odd number of groups + // remaining. This ensures that: + // - The tail is at least 1 group long. + // - The split is roughly even considering we still have the + // current group to process. + let mid = (len / 2) & !(Group::WIDTH - 1); + + let tail = Self::new( + self.next_ctrl.add(mid), + self.data.next_n(Group::WIDTH).next_n(mid), + len - mid, + ); + debug_assert_eq!( + self.data.next_n(Group::WIDTH).next_n(mid).ptr, + tail.data.ptr + ); + debug_assert_eq!(self.end, tail.end); + self.end = self.next_ctrl.add(mid); + debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl); + (self, Some(tail)) + } + } + } + + /// # Safety + /// If `DO_CHECK_PTR_RANGE` is false, caller must ensure that we never try to iterate + /// after yielding all elements. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn next_impl(&mut self) -> Option> { + loop { + if let Some(index) = self.current_group.next() { + return Some(self.data.next_n(index)); + } + + if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end { + return None; + } + + // We might read past self.end up to the next group boundary, + // but this is fine because it only occurs on tables smaller + // than the group size where the trailing control bytes are all + // EMPTY. On larger tables self.end is guaranteed to be aligned + // to the group size (since tables are power-of-two sized). + self.current_group = Group::load_aligned(self.next_ctrl.cast()) + .match_full() + .into_iter(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } + } + + /// Folds every element into an accumulator by applying an operation, + /// returning the final result. + /// + /// `fold_impl()` takes three arguments: the number of items remaining in + /// the iterator, an initial value, and a closure with two arguments: an + /// 'accumulator', and an element. The closure returns the value that the + /// accumulator should have for the next iteration. + /// + /// The initial value is the value the accumulator will have on the first call. + /// + /// After applying this closure to every element of the iterator, `fold_impl()` + /// returns the accumulator. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved, + /// i.e. table outlives the `RawIterRange`; + /// + /// * The provided `n` value must match the actual number of items + /// in the table. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[allow(clippy::while_let_on_iterator)] + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn fold_impl(mut self, mut n: usize, mut acc: B, mut f: F) -> B + where + F: FnMut(B, Bucket) -> B, + { + loop { + while let Some(index) = self.current_group.next() { + // The returned `index` will always be in the range `0..Group::WIDTH`, + // so that calling `self.data.next_n(index)` is safe (see detailed explanation below). + debug_assert!(n != 0); + let bucket = self.data.next_n(index); + acc = f(acc, bucket); + n -= 1; + } + + if n == 0 { + return acc; + } + + // SAFETY: The caller of this function ensures that: + // + // 1. The provided `n` value matches the actual number of items in the table; + // 2. The table is alive and did not moved. + // + // Taking the above into account, we always stay within the bounds, because: + // + // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH), + // we will never end up in the given branch, since we should have already + // yielded all the elements of the table. + // + // 2. For tables larger than the group width. The number of buckets is a + // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since + // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the + // start of the array of control bytes, and never try to iterate after + // getting all the elements, the last `self.current_group` will read bytes + // from the `self.buckets() - Group::WIDTH` index. We know also that + // `self.current_group.next()` will always return indices within the range + // `0..Group::WIDTH`. + // + // Knowing all of the above and taking into account that we are synchronizing + // the `self.data` index with the index we used to read the `self.current_group`, + // the subsequent `self.data.next_n(index)` will always return a bucket with + // an index number less than `self.buckets()`. + // + // The last `self.next_ctrl`, whose index would be `self.buckets()`, will never + // actually be read, since we should have already yielded all the elements of + // the table. + self.current_group = Group::load_aligned(self.next_ctrl.cast()) + .match_full() + .into_iter(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } + } +} + +// We make raw iterators unconditionally Send and Sync, and let the PhantomData +// in the actual iterator implementations determine the real Send/Sync bounds. +unsafe impl Send for RawIterRange {} +unsafe impl Sync for RawIterRange {} + +impl Clone for RawIterRange { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + next_ctrl: self.next_ctrl, + current_group: self.current_group, + end: self.end, + } + } +} + +impl Iterator for RawIterRange { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + unsafe { + // SAFETY: We set checker flag to true. + self.next_impl::() + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // We don't have an item count, so just guess based on the range size. + let remaining_buckets = if self.end > self.next_ctrl { + unsafe { offset_from(self.end, self.next_ctrl) } + } else { + 0 + }; + + // Add a group width to include the group we are currently processing. + (0, Some(Group::WIDTH + remaining_buckets)) + } +} + +impl FusedIterator for RawIterRange {} + +/// Iterator which returns a raw pointer to every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket (unless `reflect_remove` is called). +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator (unless `reflect_insert` is called). +/// - The order in which the iterator yields bucket is unspecified and may +/// change in the future. +pub struct RawIter { + pub(crate) iter: RawIterRange, + items: usize, +} + +impl RawIter { + unsafe fn drop_elements(&mut self) { + if T::NEEDS_DROP && self.items != 0 { + for item in self { + item.drop(); + } + } + } +} + +impl Clone for RawIter { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + iter: self.iter.clone(), + items: self.items, + } + } +} +impl Default for RawIter { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + // SAFETY: Because the table is static, it always outlives the iter. + unsafe { RawTableInner::NEW.iter() } + } +} + +impl Iterator for RawIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + // Inner iterator iterates over buckets + // so it can do unnecessary work if we already yielded all items. + if self.items == 0 { + return None; + } + + let nxt = unsafe { + // SAFETY: We check number of items to yield using `items` field. + self.iter.next_impl::() + }; + + debug_assert!(nxt.is_some()); + self.items -= 1; + + nxt + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (self.items, Some(self.items)) + } + + #[inline] + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + unsafe { self.iter.fold_impl(self.items, init, f) } + } +} + +impl ExactSizeIterator for RawIter {} +impl FusedIterator for RawIter {} + +/// Iterator which returns an index of every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding index of that bucket. +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator. +/// - The order in which the iterator yields indices of the buckets is unspecified +/// and may change in the future. +pub(crate) struct FullBucketsIndices { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMaskIter, + + // Initial value of the bytes' indices of the current group (relative + // to the start of the control bytes). + group_first_index: usize, + + // Pointer to the current group of control bytes, + // Must be aligned to the group size (Group::WIDTH). + ctrl: NonNull, + + // Number of elements in the table. + items: usize, +} + +impl FullBucketsIndices { + /// Advances the iterator and returns the next value. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved, + /// i.e. table outlives the `FullBucketsIndices`; + /// + /// * It never tries to iterate after getting all elements. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline(always)] + unsafe fn next_impl(&mut self) -> Option { + loop { + if let Some(index) = self.current_group.next() { + // The returned `self.group_first_index + index` will always + // be in the range `0..self.buckets()`. See explanation below. + return Some(self.group_first_index + index); + } + + // SAFETY: The caller of this function ensures that: + // + // 1. It never tries to iterate after getting all the elements; + // 2. The table is alive and did not moved; + // 3. The first `self.ctrl` pointed to the start of the array of control bytes. + // + // Taking the above into account, we always stay within the bounds, because: + // + // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH), + // we will never end up in the given branch, since we should have already + // yielded all the elements of the table. + // + // 2. For tables larger than the group width. The number of buckets is a + // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since + // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the + // the start of the array of control bytes, and never try to iterate after + // getting all the elements, the last `self.ctrl` will be equal to + // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()` + // will always contains indices within the range `0..Group::WIDTH`, + // and subsequent `self.group_first_index + index` will always return a + // number less than `self.buckets()`. + self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH)); + + // SAFETY: See explanation above. + self.current_group = Group::load_aligned(self.ctrl.as_ptr().cast()) + .match_full() + .into_iter(); + self.group_first_index += Group::WIDTH; + } + } +} + +impl Iterator for FullBucketsIndices { + type Item = usize; + + /// Advances the iterator and returns the next value. It is up to + /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`, + /// because we cannot make the `next` method unsafe. + #[inline(always)] + fn next(&mut self) -> Option { + // Return if we already yielded all items. + if self.items == 0 { + return None; + } + + let nxt = unsafe { + // SAFETY: + // 1. We check number of items to yield using `items` field. + // 2. The caller ensures that the table is alive and has not moved. + self.next_impl() + }; + + debug_assert!(nxt.is_some()); + self.items -= 1; + + nxt + } + + #[inline(always)] + fn size_hint(&self) -> (usize, Option) { + (self.items, Some(self.items)) + } +} + +impl ExactSizeIterator for FullBucketsIndices {} +impl FusedIterator for FullBucketsIndices {} + +/// Iterator which consumes a table and returns elements. +pub struct RawIntoIter { + iter: RawIter, + allocation: Option<(NonNull, Layout, A)>, + marker: PhantomData, +} + +impl RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawIntoIter +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawIntoIter +where + T: Sync, + A: Sync, +{ +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + self.iter.drop_elements(); + + // Free the table + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + self.iter.drop_elements(); + + // Free the table + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); + } + } + } +} + +impl Default for RawIntoIter { + fn default() -> Self { + Self { + iter: Default::default(), + allocation: None, + marker: PhantomData, + } + } +} +impl Iterator for RawIntoIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { Some(self.iter.next()?.read()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} + +/// Iterator which consumes elements without freeing the table storage. +pub struct RawDrain<'a, T, A: Allocator = Global> { + iter: RawIter, + + // The table is moved into the iterator for the duration of the drain. This + // ensures that an empty table is left if the drain iterator is leaked + // without dropping. + table: RawTableInner, + orig_table: NonNull, + + // We don't use a &'a mut RawTable because we want RawDrain to be + // covariant over T. + marker: PhantomData<&'a RawTable>, +} + +impl RawDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawDrain<'_, T, A> +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawDrain<'_, T, A> +where + T: Sync, + A: Sync, +{ +} + +impl Drop for RawDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements. Note that this may panic. + self.iter.drop_elements(); + + // Reset the contents of the table now that all elements have been + // dropped. + self.table.clear_no_drop(); + + // Move the now empty table back to its original location. + self.orig_table + .as_ptr() + .copy_from_nonoverlapping(&self.table, 1); + } + } +} + +impl Iterator for RawDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { + let item = self.iter.next()?; + Some(item.read()) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawDrain<'_, T, A> {} +impl FusedIterator for RawDrain<'_, T, A> {} + +/// Iterator over occupied buckets that could match a given hash. +/// +/// `RawTable` only stores 7 bits of the hash value, so this iterator may return +/// items that have a hash value different than the one provided. You should +/// always validate the returned values before using them. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket. +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator. +/// - The order in which the iterator yields buckets is unspecified and may +/// change in the future. +pub struct RawIterHash { + inner: RawIterHashInner, + _marker: PhantomData, +} + +#[derive(Clone)] +struct RawIterHashInner { + // See `RawTableInner`'s corresponding fields for details. + // We can't store a `*const RawTableInner` as it would get + // invalidated by the user calling `&mut` methods on `RawTable`. + bucket_mask: usize, + ctrl: NonNull, + + // The top 7 bits of the hash. + tag_hash: Tag, + + // The sequence of groups to probe in the search. + probe_seq: ProbeSeq, + + group: Group, + + // The elements within the group with a matching tag-hash. + bitmask: BitMaskIter, +} + +impl RawIterHash { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new(table: &RawTable, hash: u64) -> Self { + RawIterHash { + inner: RawIterHashInner::new(&table.table, hash), + _marker: PhantomData, + } + } +} + +impl Clone for RawIterHash { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + _marker: PhantomData, + } + } +} + +impl Default for RawIterHash { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + // SAFETY: Because the table is static, it always outlives the iter. + inner: unsafe { RawIterHashInner::new(&RawTableInner::NEW, 0) }, + _marker: PhantomData, + } + } +} + +impl RawIterHashInner { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new(table: &RawTableInner, hash: u64) -> Self { + let tag_hash = Tag::full(hash); + let probe_seq = table.probe_seq(hash); + let group = Group::load(table.ctrl(probe_seq.pos)); + let bitmask = group.match_tag(tag_hash).into_iter(); + + RawIterHashInner { + bucket_mask: table.bucket_mask, + ctrl: table.ctrl, + tag_hash, + probe_seq, + group, + bitmask, + } + } +} + +impl Iterator for RawIterHash { + type Item = Bucket; + + fn next(&mut self) -> Option> { + unsafe { + match self.inner.next() { + Some(index) => { + // Can't use `RawTable::bucket` here as we don't have + // an actual `RawTable` reference to use. + debug_assert!(index <= self.inner.bucket_mask); + let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index); + Some(bucket) + } + None => None, + } + } + } +} + +impl Iterator for RawIterHashInner { + type Item = usize; + + fn next(&mut self) -> Option { + unsafe { + loop { + if let Some(bit) = self.bitmask.next() { + let index = (self.probe_seq.pos + bit) & self.bucket_mask; + return Some(index); + } + if likely(self.group.match_empty().any_bit_set()) { + return None; + } + self.probe_seq.move_next(self.bucket_mask); + + // Can't use `RawTableInner::ctrl` here as we don't have + // an actual `RawTableInner` reference to use. + let index = self.probe_seq.pos; + debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH); + let group_ctrl = self.ctrl.as_ptr().add(index).cast(); + + self.group = Group::load(group_ctrl); + self.bitmask = self.group.match_tag(self.tag_hash).into_iter(); + } + } + } +} + +pub(crate) struct RawExtractIf<'a, T, A: Allocator> { + pub iter: RawIter, + pub table: &'a mut RawTable, +} + +impl RawExtractIf<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn next(&mut self, mut f: F) -> Option + where + F: FnMut(&mut T) -> bool, + { + unsafe { + for item in &mut self.iter { + if f(item.as_mut()) { + return Some(self.table.remove(item).0); + } + } + } + None + } +} + +#[cfg(test)] +mod test_map { + use super::*; + + fn rehash_in_place(table: &mut RawTable, hasher: impl Fn(&T) -> u64) { + unsafe { + table.table.rehash_in_place( + &|table, index| hasher(table.bucket::(index).as_ref()), + mem::size_of::(), + if mem::needs_drop::() { + Some(|ptr| ptr::drop_in_place(ptr as *mut T)) + } else { + None + }, + ); + } + } + + #[test] + fn rehash() { + let mut table = RawTable::new(); + let hasher = |i: &u64| *i; + for i in 0..100 { + table.insert(i, i, hasher); + } + + for i in 0..100 { + unsafe { + assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); + } + assert!(table.find(i + 100, |x| *x == i + 100).is_none()); + } + + rehash_in_place(&mut table, hasher); + + for i in 0..100 { + unsafe { + assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); + } + assert!(table.find(i + 100, |x| *x == i + 100).is_none()); + } + } + + /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF + /// AN UNINITIALIZED TABLE DURING THE DROP + #[test] + fn test_drop_uninitialized() { + use ::alloc::vec::Vec; + + let table = unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap() + }; + drop(table); + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_drop_zero_items() { + use ::alloc::vec::Vec; + unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + let table = + RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap(); + + // WE SIMULATE, AS IT WERE, A FULL TABLE. + + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. + table + .table + .ctrl(0) + .write_bytes(Tag::EMPTY.0, table.table.num_ctrl_bytes()); + + // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets() + table.table.ctrl(0).write_bytes(0, table.capacity()); + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + if table.buckets() < Group::WIDTH { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets()); + } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH); + } + drop(table); + } + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_catch_panic_clone_from() { + use ::alloc::sync::Arc; + use ::alloc::vec::Vec; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::sync::atomic::{AtomicI8, Ordering}; + use std::thread; + + struct MyAllocInner { + drop_count: Arc, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc, + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + struct CheckedCloneDrop { + panic_in_clone: bool, + dropped: bool, + need_drop: Vec, + } + + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + dropped: self.dropped, + need_drop: self.need_drop.clone(), + } + } + } + + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + let mut table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + + for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() { + let idx = idx as u64; + table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx], + }, + ), + |(k, _)| *k, + ); + } + + assert_eq!(table.len(), 7); + + thread::scope(|s| { + let result = s.spawn(|| { + let armed_flags = [ + DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + let mut scope_table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + for (idx, &panic_in_clone) in armed_flags.iter().enumerate() { + let idx = idx as u64; + scope_table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx + 100], + }, + ), + |(k, _)| *k, + ); + } + table.clone_from(&scope_table); + }); + assert!(result.join().is_err()); + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(table.len(), 0); + assert_eq!(unsafe { table.iter().count() }, 0); + assert_eq!(unsafe { table.iter().iter.count() }, 0); + + for idx in 0..table.buckets() { + let idx = idx as u64; + assert!( + table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 1); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/neon.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/neon.rs new file mode 100644 index 000000000000..b79f139e8eda --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/neon.rs @@ -0,0 +1,124 @@ +use super::bitmask::BitMask; +use super::Tag; +use core::arch::aarch64 as neon; +use core::mem; +use core::num::NonZeroU64; + +pub(crate) type BitMaskWord = u64; +pub(crate) type NonZeroBitMaskWord = NonZeroU64; +pub(crate) const BITMASK_STRIDE: usize = 8; +pub(crate) const BITMASK_MASK: BitMaskWord = !0; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080; + +/// Abstraction over a group of control tags which can be scanned in +/// parallel. +/// +/// This implementation uses a 64-bit NEON value. +#[derive(Copy, Clone)] +pub(crate) struct Group(neon::uint8x8_t); + +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty tags, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] { + #[repr(C)] + struct AlignedTags { + _align: [Group; 0], + tags: [Tag; Group::WIDTH], + } + const ALIGNED_TAGS: AlignedTags = AlignedTags { + _align: [], + tags: [Tag::EMPTY; Group::WIDTH], + }; + &ALIGNED_TAGS.tags + } + + /// Loads a group of tags starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const Tag) -> Self { + Group(neon::vld1_u8(ptr.cast())) + } + + /// Loads a group of tags starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(neon::vld1_u8(ptr.cast())) + } + + /// Stores the group of tags to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + neon::vst1_u8(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all tags in the group which *may* + /// have the given value. + #[inline] + pub(crate) fn match_tag(self, tag: Tag) -> BitMask { + unsafe { + let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(tag.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Returns a `BitMask` indicating all tags in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + self.match_tag(Tag::EMPTY) + } + + /// Returns a `BitMask` indicating all tags in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + unsafe { + let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Returns a `BitMask` indicating all tags in the group which are full. + #[inline] + pub(crate) fn match_full(self) -> BitMask { + unsafe { + let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Performs the following transformation on all tags in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > tag = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + unsafe { + let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); + Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80))) + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/sse2.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/sse2.rs new file mode 100644 index 000000000000..87af2727be67 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw/sse2.rs @@ -0,0 +1,149 @@ +use super::bitmask::BitMask; +use super::Tag; +use core::mem; +use core::num::NonZeroU16; + +#[cfg(target_arch = "x86")] +use core::arch::x86; +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64 as x86; + +pub(crate) type BitMaskWord = u16; +pub(crate) type NonZeroBitMaskWord = NonZeroU16; +pub(crate) const BITMASK_STRIDE: usize = 1; +pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; + +/// Abstraction over a group of control tags which can be scanned in +/// parallel. +/// +/// This implementation uses a 128-bit SSE value. +#[derive(Copy, Clone)] +pub(crate) struct Group(x86::__m128i); + +// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty tags, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + #[allow(clippy::items_after_statements)] + pub(crate) const fn static_empty() -> &'static [Tag; Group::WIDTH] { + #[repr(C)] + struct AlignedTags { + _align: [Group; 0], + tags: [Tag; Group::WIDTH], + } + const ALIGNED_TAGS: AlignedTags = AlignedTags { + _align: [], + tags: [Tag::EMPTY; Group::WIDTH], + }; + &ALIGNED_TAGS.tags + } + + /// Loads a group of tags starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const Tag) -> Self { + Group(x86::_mm_loadu_si128(ptr.cast())) + } + + /// Loads a group of tags starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const Tag) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(x86::_mm_load_si128(ptr.cast())) + } + + /// Stores the group of tags to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut Tag) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + x86::_mm_store_si128(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all tags in the group which have + /// the given value. + #[inline] + pub(crate) fn match_tag(self, tag: Tag) -> BitMask { + #[allow( + clippy::cast_possible_wrap, // tag.0: Tag as i8 + // tag: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(tag.0 as i8)); + BitMask(x86::_mm_movemask_epi8(cmp) as u16) + } + } + + /// Returns a `BitMask` indicating all tags in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + self.match_tag(Tag::EMPTY) + } + + /// Returns a `BitMask` indicating all tags in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + #[allow( + // tag: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + // A tag is EMPTY or DELETED iff the high bit is set + BitMask(x86::_mm_movemask_epi8(self.0) as u16) + } + } + + /// Returns a `BitMask` indicating all tags in the group which are full. + #[inline] + pub(crate) fn match_full(&self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all tags in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > tag = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + #[allow( + clippy::cast_possible_wrap, // tag: Tag::DELETED.0 as i8 + )] + unsafe { + let zero = x86::_mm_setzero_si128(); + let special = x86::_mm_cmpgt_epi8(zero, self.0); + Group(x86::_mm_or_si128( + special, + x86::_mm_set1_epi8(Tag::DELETED.0 as i8), + )) + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw_entry.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw_entry.rs new file mode 100644 index 000000000000..480ebdbe1fb9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/raw_entry.rs @@ -0,0 +1,1740 @@ +use crate::hash_map::{equivalent, make_hash, make_hasher}; +use crate::raw::{Allocator, Bucket, Global, RawTable}; +use crate::{Equivalent, HashMap}; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash}; +use core::mem; + +impl HashMap { + /// Creates a raw entry builder for the `HashMap`. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. After this, insertions into a vacant entry + /// still require an owned key to be provided. + /// + /// Raw entries are useful for such exotic situations as: + /// + /// * Hash memoization + /// * Deferring the creation of an owned key until it is known to be required + /// * Using a search key that doesn't work with the Borrow trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Because raw entries provide much more low-level control, it's much easier + /// to put the `HashMap` into an inconsistent state which, while memory-safe, + /// will cause the map to produce seemingly random results. Higher-level and + /// more foolproof APIs like `entry` should be preferred when possible. + /// + /// In particular, the hash used to initialized the raw entry must still be + /// consistent with the hash of the key that is ultimately stored in the entry. + /// This is because implementations of `HashMap` may need to recompute hashes + /// when resizing, at which point only the keys are available. + /// + /// Raw entries give mutable access to the keys. This must not be used + /// to modify how the key would compare or hash, as the map will not re-evaluate + /// where the key should go, meaning the keys may become "lost" if their + /// location does not reflect their state. For instance, if you change a key + /// so that the map now contains keys which compare equal, search may start + /// acting erratically, with two keys randomly masking each other. Implementations + /// are free to assume this doesn't happen (within the limits of memory-safety). + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map = HashMap::new(); + /// map.extend([("a", 100), ("b", 200), ("c", 300)]); + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// // Existing key (insert and update) + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(mut view) => { + /// assert_eq!(view.get(), &100); + /// let v = view.get_mut(); + /// let new_v = (*v) * 10; + /// *v = new_v; + /// assert_eq!(view.insert(1111), 1000); + /// } + /// } + /// + /// assert_eq!(map[&"a"], 1111); + /// assert_eq!(map.len(), 3); + /// + /// // Existing key (take) + /// let hash = compute_hash(map.hasher(), &"c"); + /// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"c") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(view) => { + /// assert_eq!(view.remove_entry(), ("c", 300)); + /// } + /// } + /// assert_eq!(map.raw_entry().from_key(&"c"), None); + /// assert_eq!(map.len(), 2); + /// + /// // Nonexistent key (insert and update) + /// let key = "d"; + /// let hash = compute_hash(map.hasher(), &key); + /// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { + /// RawEntryMut::Occupied(_) => unreachable!(), + /// RawEntryMut::Vacant(view) => { + /// let (k, value) = view.insert("d", 4000); + /// assert_eq!((*k, *value), ("d", 4000)); + /// *value = 40000; + /// } + /// } + /// assert_eq!(map[&"d"], 40000); + /// assert_eq!(map.len(), 3); + /// + /// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(view) => { + /// assert_eq!(view.remove_entry(), ("d", 40000)); + /// } + /// } + /// assert_eq!(map.get(&"d"), None); + /// assert_eq!(map.len(), 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, S, A> { + RawEntryBuilderMut { map: self } + } + + /// Creates a raw immutable entry builder for the `HashMap`. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. + /// + /// This is useful for + /// * Hash memoization + /// * Using a search key that doesn't work with the Borrow trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Unless you are in such a situation, higher-level and more foolproof APIs like + /// `get` should be preferred. + /// + /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.extend([("a", 100), ("b", 200), ("c", 300)]); + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// for k in ["a", "b", "c", "d", "e", "f"] { + /// let hash = compute_hash(map.hasher(), k); + /// let v = map.get(&k).cloned(); + /// let kv = v.as_ref().map(|v| (&k, v)); + /// + /// println!("Key: {} and value: {:?}", k, v); + /// + /// assert_eq!(map.raw_entry().from_key(&k), kv); + /// assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); + /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, S, A> { + RawEntryBuilder { map: self } + } +} + +/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. +/// +/// See the [`HashMap::raw_entry_mut`] docs for usage examples. +/// +/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{RawEntryBuilderMut, RawEntryMut::Vacant, RawEntryMut::Occupied}; +/// use hashbrown::HashMap; +/// use core::hash::{BuildHasher, Hash}; +/// +/// let mut map = HashMap::new(); +/// map.extend([(1, 11), (2, 12), (3, 13), (4, 14), (5, 15), (6, 16)]); +/// assert_eq!(map.len(), 6); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// let builder: RawEntryBuilderMut<_, _, _> = map.raw_entry_mut(); +/// +/// // Existing key +/// match builder.from_key(&6) { +/// Vacant(_) => unreachable!(), +/// Occupied(view) => assert_eq!(view.get(), &16), +/// } +/// +/// for key in 0..12 { +/// let hash = compute_hash(map.hasher(), &key); +/// let value = map.get(&key).cloned(); +/// let key_value = value.as_ref().map(|v| (&key, v)); +/// +/// println!("Key: {} and value: {:?}", key, value); +/// +/// match map.raw_entry_mut().from_key(&key) { +/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), +/// Vacant(_) => assert_eq!(value, None), +/// } +/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &key) { +/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), +/// Vacant(_) => assert_eq!(value, None), +/// } +/// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { +/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), +/// Vacant(_) => assert_eq!(value, None), +/// } +/// } +/// +/// assert_eq!(map.len(), 6); +/// ``` +pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator = Global> { + map: &'a mut HashMap, +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This is a lower-level version of [`Entry`]. +/// +/// This `enum` is constructed through the [`raw_entry_mut`] method on [`HashMap`], +/// then calling one of the methods of that [`RawEntryBuilderMut`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`Entry`]: enum.Entry.html +/// [`raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut +/// [`RawEntryBuilderMut`]: struct.RawEntryBuilderMut.html +/// +/// # Examples +/// +/// ``` +/// use core::hash::{BuildHasher, Hash}; +/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawOccupiedEntryMut}; +/// +/// let mut map = HashMap::new(); +/// map.extend([('a', 1), ('b', 2), ('c', 3)]); +/// assert_eq!(map.len(), 3); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// // Existing key (insert) +/// let raw: RawEntryMut<_, _, _> = map.raw_entry_mut().from_key(&'a'); +/// let _raw_o: RawOccupiedEntryMut<_, _, _> = raw.insert('a', 10); +/// assert_eq!(map.len(), 3); +/// +/// // Nonexistent key (insert) +/// map.raw_entry_mut().from_key(&'d').insert('d', 40); +/// assert_eq!(map.len(), 4); +/// +/// // Existing key (or_insert) +/// let hash = compute_hash(map.hasher(), &'b'); +/// let kv = map +/// .raw_entry_mut() +/// .from_key_hashed_nocheck(hash, &'b') +/// .or_insert('b', 20); +/// assert_eq!(kv, (&mut 'b', &mut 2)); +/// *kv.1 = 20; +/// assert_eq!(map.len(), 4); +/// +/// // Nonexistent key (or_insert) +/// let hash = compute_hash(map.hasher(), &'e'); +/// let kv = map +/// .raw_entry_mut() +/// .from_key_hashed_nocheck(hash, &'e') +/// .or_insert('e', 50); +/// assert_eq!(kv, (&mut 'e', &mut 50)); +/// assert_eq!(map.len(), 5); +/// +/// // Existing key (or_insert_with) +/// let hash = compute_hash(map.hasher(), &'c'); +/// let kv = map +/// .raw_entry_mut() +/// .from_hash(hash, |q| q == &'c') +/// .or_insert_with(|| ('c', 30)); +/// assert_eq!(kv, (&mut 'c', &mut 3)); +/// *kv.1 = 30; +/// assert_eq!(map.len(), 5); +/// +/// // Nonexistent key (or_insert_with) +/// let hash = compute_hash(map.hasher(), &'f'); +/// let kv = map +/// .raw_entry_mut() +/// .from_hash(hash, |q| q == &'f') +/// .or_insert_with(|| ('f', 60)); +/// assert_eq!(kv, (&mut 'f', &mut 60)); +/// assert_eq!(map.len(), 6); +/// +/// println!("Our HashMap: {:?}", map); +/// +/// let mut vec: Vec<_> = map.iter().map(|(&k, &v)| (k, v)).collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); +/// ``` +pub enum RawEntryMut<'a, K, V, S, A: Allocator = Global> { + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::{hash_map::RawEntryMut, HashMap}; + /// let mut map: HashMap<_, _> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(_) => { } + /// } + /// ``` + Occupied(RawOccupiedEntryMut<'a, K, V, S, A>), + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::{hash_map::RawEntryMut, HashMap}; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// + /// match map.raw_entry_mut().from_key("a") { + /// RawEntryMut::Occupied(_) => unreachable!(), + /// RawEntryMut::Vacant(_) => { } + /// } + /// ``` + Vacant(RawVacantEntryMut<'a, K, V, S, A>), +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`RawEntryMut`] enum. +/// +/// [`RawEntryMut`]: enum.RawEntryMut.html +/// +/// # Examples +/// +/// ``` +/// use core::hash::{BuildHasher, Hash}; +/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawOccupiedEntryMut}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a", 10), ("b", 20), ("c", 30)]); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// let _raw_o: RawOccupiedEntryMut<_, _, _> = map.raw_entry_mut().from_key(&"a").insert("a", 100); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert and update) +/// match map.raw_entry_mut().from_key(&"a") { +/// RawEntryMut::Vacant(_) => unreachable!(), +/// RawEntryMut::Occupied(mut view) => { +/// assert_eq!(view.get(), &100); +/// let v = view.get_mut(); +/// let new_v = (*v) * 10; +/// *v = new_v; +/// assert_eq!(view.insert(1111), 1000); +/// } +/// } +/// +/// assert_eq!(map[&"a"], 1111); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (take) +/// let hash = compute_hash(map.hasher(), &"c"); +/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"c") { +/// RawEntryMut::Vacant(_) => unreachable!(), +/// RawEntryMut::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("c", 30)); +/// } +/// } +/// assert_eq!(map.raw_entry().from_key(&"c"), None); +/// assert_eq!(map.len(), 2); +/// +/// let hash = compute_hash(map.hasher(), &"b"); +/// match map.raw_entry_mut().from_hash(hash, |q| *q == "b") { +/// RawEntryMut::Vacant(_) => unreachable!(), +/// RawEntryMut::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("b", 20)); +/// } +/// } +/// assert_eq!(map.get(&"b"), None); +/// assert_eq!(map.len(), 1); +/// ``` +pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator = Global> { + elem: Bucket<(K, V)>, + table: &'a mut RawTable<(K, V), A>, + hash_builder: &'a S, +} + +unsafe impl Send for RawOccupiedEntryMut<'_, K, V, S, A> +where + K: Send, + V: Send, + S: Send, + A: Send + Allocator, +{ +} +unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S, A> +where + K: Sync, + V: Sync, + S: Sync, + A: Sync + Allocator, +{ +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`RawEntryMut`] enum. +/// +/// [`RawEntryMut`]: enum.RawEntryMut.html +/// +/// # Examples +/// +/// ``` +/// use core::hash::{BuildHasher, Hash}; +/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawVacantEntryMut}; +/// +/// let mut map = HashMap::<&str, i32>::new(); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// let raw_v: RawVacantEntryMut<_, _, _> = match map.raw_entry_mut().from_key(&"a") { +/// RawEntryMut::Vacant(view) => view, +/// RawEntryMut::Occupied(_) => unreachable!(), +/// }; +/// raw_v.insert("a", 10); +/// assert!(map[&"a"] == 10 && map.len() == 1); +/// +/// // Nonexistent key (insert and update) +/// let hash = compute_hash(map.hasher(), &"b"); +/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"b") { +/// RawEntryMut::Occupied(_) => unreachable!(), +/// RawEntryMut::Vacant(view) => { +/// let (k, value) = view.insert("b", 2); +/// assert_eq!((*k, *value), ("b", 2)); +/// *value = 20; +/// } +/// } +/// assert!(map[&"b"] == 20 && map.len() == 2); +/// +/// let hash = compute_hash(map.hasher(), &"c"); +/// match map.raw_entry_mut().from_hash(hash, |q| *q == "c") { +/// RawEntryMut::Occupied(_) => unreachable!(), +/// RawEntryMut::Vacant(view) => { +/// assert_eq!(view.insert("c", 30), (&mut "c", &mut 30)); +/// } +/// } +/// assert!(map[&"c"] == 30 && map.len() == 3); +/// ``` +pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator = Global> { + table: &'a mut RawTable<(K, V), A>, + hash_builder: &'a S, +} + +/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. +/// +/// See the [`HashMap::raw_entry`] docs for usage examples. +/// +/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{HashMap, RawEntryBuilder}; +/// use core::hash::{BuildHasher, Hash}; +/// +/// let mut map = HashMap::new(); +/// map.extend([(1, 10), (2, 20), (3, 30)]); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// for k in 0..6 { +/// let hash = compute_hash(map.hasher(), &k); +/// let v = map.get(&k).cloned(); +/// let kv = v.as_ref().map(|v| (&k, v)); +/// +/// println!("Key: {} and value: {:?}", k, v); +/// let builder: RawEntryBuilder<_, _, _> = map.raw_entry(); +/// assert_eq!(builder.from_key(&k), kv); +/// assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); +/// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); +/// } +/// ``` +pub struct RawEntryBuilder<'a, K, V, S, A: Allocator = Global> { + map: &'a HashMap, +} + +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { + /// Creates a `RawEntryMut` from the given key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_key(&key); + /// entry.insert(key, 100); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S, A> + where + S: BuildHasher, + Q: Hash + Equivalent + ?Sized, + { + let hash = make_hash::(&self.map.hash_builder, k); + self.from_key_hashed_nocheck(hash, k) + } + + /// Creates a `RawEntryMut` from the given key and its hash. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_key_hashed_nocheck(hash, &key); + /// entry.insert(key, 100); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[inline] + #[allow(clippy::wrong_self_convention)] + pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A> + where + Q: Equivalent + ?Sized, + { + self.from_hash(hash, equivalent(k)) + } +} + +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { + /// Creates a `RawEntryMut` from the given hash and matching function. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_hash(hash, |k| k == &key); + /// entry.insert(key, 100); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S, A> + where + for<'b> F: FnMut(&'b K) -> bool, + { + self.search(hash, is_match) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn search(self, hash: u64, mut is_match: F) -> RawEntryMut<'a, K, V, S, A> + where + for<'b> F: FnMut(&'b K) -> bool, + { + match self.map.table.find(hash, |(k, _)| is_match(k)) { + Some(elem) => RawEntryMut::Occupied(RawOccupiedEntryMut { + elem, + table: &mut self.map.table, + hash_builder: &self.map.hash_builder, + }), + None => RawEntryMut::Vacant(RawVacantEntryMut { + table: &mut self.map.table, + hash_builder: &self.map.hash_builder, + }), + } + } +} + +impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { + /// Access an immutable entry by key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "a"; + /// assert_eq!(map.raw_entry().from_key(&key), Some((&"a", &100))); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> + where + S: BuildHasher, + Q: Hash + Equivalent + ?Sized, + { + let hash = make_hash::(&self.map.hash_builder, k); + self.from_key_hashed_nocheck(hash, k) + } + + /// Access an immutable entry by a key and its hash. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &key), Some((&"a", &100))); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> + where + Q: Equivalent + ?Sized, + { + self.from_hash(hash, equivalent(k)) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn search(self, hash: u64, mut is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + match self.map.table.get(hash, |(k, _)| is_match(k)) { + Some((key, value)) => Some((key, value)), + None => None, + } + } + + /// Access an immutable entry by hash and matching function. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// assert_eq!(map.raw_entry().from_hash(hash, |k| k == &key), Some((&"a", &100))); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + self.search(hash, is_match) + } +} + +impl<'a, K, V, S, A: Allocator> RawEntryMut<'a, K, V, S, A> { + /// Sets the value of the entry, and returns a `RawOccupiedEntryMut`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.raw_entry_mut().from_key("horseyland").insert("horseyland", 37); + /// + /// assert_eq!(entry.remove_entry(), ("horseyland", 37)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(mut entry) => { + entry.insert(value); + entry + } + RawEntryMut::Vacant(entry) => entry.insert_entry(key, value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// mutable references to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(entry) => entry.into_key_value(), + RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns mutable references to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// + /// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| { + /// ("poneyland", "hoho".to_string()) + /// }); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with(self, default: F) -> (&'a mut K, &'a mut V) + where + F: FnOnce() -> (K, V), + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(entry) => entry.into_key_value(), + RawEntryMut::Vacant(entry) => { + let (k, v) = default(); + entry.insert(k, v) + } + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.raw_entry_mut() + /// .from_key("poneyland") + /// .and_modify(|_k, v| { *v += 1 }) + /// .or_insert("poneyland", 42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.raw_entry_mut() + /// .from_key("poneyland") + /// .and_modify(|_k, v| { *v += 1 }) + /// .or_insert("poneyland", 0); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut K, &mut V), + { + match self { + RawEntryMut::Occupied(mut entry) => { + { + let (k, v) = entry.get_key_value_mut(); + f(k, v); + } + RawEntryMut::Occupied(entry) + } + RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RawEntryMut; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// RawEntryMut::Vacant(_) => {}, + /// RawEntryMut::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland", 42); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// RawEntryMut::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// }, + /// RawEntryMut::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// RawEntryMut::Vacant(_) => {}, + /// RawEntryMut::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&K, V) -> Option, + { + match self { + RawEntryMut::Occupied(entry) => entry.replace_entry_with(f), + RawEntryMut::Vacant(_) => self, + } + } +} + +impl<'a, K, V, S, A: Allocator> RawOccupiedEntryMut<'a, K, V, S, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.key(), &"a") + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Gets a mutable reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => { + /// *o.key_mut() = key_two.clone(); + /// } + /// } + /// assert_eq!(map[&key_two], 10); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key_mut(&mut self) -> &mut K { + unsafe { &mut self.elem.as_mut().0 } + } + + /// Converts the entry into a mutable reference to the key in the entry + /// with a lifetime bound to the map itself. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// let inside_key: &mut Rc<&str>; + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => inside_key = o.into_key(), + /// } + /// *inside_key = key_two.clone(); + /// + /// assert_eq!(map[&key_two], 10); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> &'a mut K { + unsafe { &mut self.elem.as_mut().0 } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.get(), &100), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// let value: &mut u32; + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => value = o.into_mut(), + /// } + /// *value += 900; + /// + /// assert_eq!(map[&"a"], 1000); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => *o.get_mut() += 900, + /// } + /// + /// assert_eq!(map[&"a"], 1000); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Gets a reference to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.get_key_value(), (&"a", &100)), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_key_value(&self) -> (&K, &V) { + unsafe { + let (key, value) = self.elem.as_ref(); + (key, value) + } + } + + /// Gets a mutable reference to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => { + /// let (inside_key, inside_value) = o.get_key_value_mut(); + /// *inside_key = key_two.clone(); + /// *inside_value = 100; + /// } + /// } + /// assert_eq!(map[&key_two], 100); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { + unsafe { + let &mut (ref mut key, ref mut value) = self.elem.as_mut(); + (key, value) + } + } + + /// Converts the `OccupiedEntry` into a mutable reference to the key and value in the entry + /// with a lifetime bound to the map itself. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// let inside_key: &mut Rc<&str>; + /// let inside_value: &mut u32; + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => { + /// let tuple = o.into_key_value(); + /// inside_key = tuple.0; + /// inside_value = tuple.1; + /// } + /// } + /// *inside_key = key_two.clone(); + /// *inside_value = 100; + /// assert_eq!(map[&key_two], 100); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key_value(self) -> (&'a mut K, &'a mut V) { + unsafe { + let &mut (ref mut key, ref mut value) = self.elem.as_mut(); + (key, value) + } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => assert_eq!(o.insert(1000), 100), + /// } + /// + /// assert_eq!(map[&"a"], 1000); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => { + /// let old_key = o.insert_key(key_two.clone()); + /// assert!(Rc::ptr_eq(&old_key, &key_one)); + /// } + /// } + /// assert_eq!(map[&key_two], 10); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_key(&mut self, key: K) -> K { + mem::replace(self.key_mut(), key) + } + + /// Takes the value out of the entry, and returns it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.remove(), 100), + /// } + /// assert_eq!(map.get(&"a"), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Take the ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.remove_entry(), ("a", 100)), + /// } + /// assert_eq!(map.get(&"a"), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.remove(self.elem).0 } + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// let raw_entry = match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => o.replace_entry_with(|k, v| { + /// assert_eq!(k, &"a"); + /// assert_eq!(v, 100); + /// Some(v + 900) + /// }), + /// }; + /// let raw_entry = match raw_entry { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => o.replace_entry_with(|k, v| { + /// assert_eq!(k, &"a"); + /// assert_eq!(v, 1000); + /// None + /// }), + /// }; + /// match raw_entry { + /// RawEntryMut::Vacant(_) => { }, + /// RawEntryMut::Occupied(_) => panic!(), + /// }; + /// assert_eq!(map.get(&"a"), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> RawEntryMut<'a, K, V, S, A> + where + F: FnOnce(&K, V) -> Option, + { + unsafe { + let still_occupied = self + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + f(&key, value).map(|new_value| (key, new_value)) + }); + + if still_occupied { + RawEntryMut::Occupied(self) + } else { + RawEntryMut::Vacant(RawVacantEntryMut { + table: self.table, + hash_builder: self.hash_builder, + }) + } + } + } +} + +impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> { + /// Sets the value of the entry with the `VacantEntry`'s key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"c") { + /// RawEntryMut::Occupied(_) => panic!(), + /// RawEntryMut::Vacant(v) => assert_eq!(v.insert("c", 300), (&mut "c", &mut 300)), + /// } + /// + /// assert_eq!(map[&"c"], 300); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let hash = make_hash::(self.hash_builder, &key); + self.insert_hashed_nocheck(hash, key, value) + } + + /// Sets the value of the entry with the `VacantEntry`'s key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "c"; + /// let hash = compute_hash(map.hasher(), &key); + /// + /// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &key) { + /// RawEntryMut::Occupied(_) => panic!(), + /// RawEntryMut::Vacant(v) => assert_eq!( + /// v.insert_hashed_nocheck(hash, key, 300), + /// (&mut "c", &mut 300) + /// ), + /// } + /// + /// assert_eq!(map[&"c"], 300); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::shadow_unrelated)] + pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let &mut (ref mut k, ref mut v) = self.table.insert_entry( + hash, + (key, value), + make_hasher::<_, V, S>(self.hash_builder), + ); + (k, v) + } + + /// Set the value of an entry with a custom hasher function. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn make_hasher(hash_builder: &S) -> impl Fn(&K) -> u64 + '_ + /// where + /// K: Hash + ?Sized, + /// S: BuildHasher, + /// { + /// move |key: &K| { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// } + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let hash_builder = map.hasher().clone(); + /// let hash = make_hasher(&hash_builder)(&key); + /// + /// match map.raw_entry_mut().from_hash(hash, |q| q == &key) { + /// RawEntryMut::Occupied(_) => panic!(), + /// RawEntryMut::Vacant(v) => assert_eq!( + /// v.insert_with_hasher(hash, key, 100, make_hasher(&hash_builder)), + /// (&mut "a", &mut 100) + /// ), + /// } + /// map.extend([("b", 200), ("c", 300), ("d", 400), ("e", 500), ("f", 600)]); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_with_hasher( + self, + hash: u64, + key: K, + value: V, + hasher: H, + ) -> (&'a mut K, &'a mut V) + where + H: Fn(&K) -> u64, + { + let &mut (ref mut k, ref mut v) = self + .table + .insert_entry(hash, (key, value), |x| hasher(&x.0)); + (k, v) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn insert_entry(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + let hash = make_hash::(self.hash_builder, &key); + let elem = self.table.insert( + hash, + (key, value), + make_hasher::<_, V, S>(self.hash_builder), + ); + RawOccupiedEntryMut { + elem, + table: self.table, + hash_builder: self.hash_builder, + } + } +} + +impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish() + } +} + +impl Debug for RawEntryMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), + RawEntryMut::Occupied(ref o) => f.debug_tuple("RawEntry").field(o).finish(), + } + } +} + +impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawOccupiedEntryMut") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +impl Debug for RawVacantEntryMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawVacantEntryMut").finish() + } +} + +impl Debug for RawEntryBuilder<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish() + } +} + +#[cfg(test)] +mod test_map { + use super::HashMap; + use super::RawEntryMut; + + #[test] + fn test_raw_occupied_entry_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a + .raw_entry_mut() + .from_key(&key) + .insert(key, value) + .replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + RawEntryMut::Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + RawEntryMut::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.raw_entry_mut().from_key(&key) { + RawEntryMut::Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }), + RawEntryMut::Vacant(_) => panic!(), + }; + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_raw_entry_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|_, _| panic!()); + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + a.insert(key, value); + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + RawEntryMut::Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + RawEntryMut::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }); + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_raw_entry() { + use super::RawEntryMut::{Occupied, Vacant}; + + let xs = [(1_i32, 10_i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + let compute_hash = |map: &HashMap, k: i32| -> u64 { + super::make_hash::(map.hasher(), &k) + }; + + // Existing key (insert) + match map.raw_entry_mut().from_key(&1) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + let hash1 = compute_hash(&map, 1); + assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100)); + assert_eq!( + map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), + (&1, &100) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), + (&1, &100) + ); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.raw_entry_mut().from_key(&2) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + let hash2 = compute_hash(&map, 2); + assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200)); + assert_eq!( + map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), + (&2, &200) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), + (&2, &200) + ); + assert_eq!(map.len(), 6); + + // Existing key (take) + let hash3 = compute_hash(&map, 3); + match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) { + Vacant(_) => unreachable!(), + Occupied(view) => { + assert_eq!(view.remove_entry(), (3, 30)); + } + } + assert_eq!(map.raw_entry().from_key(&3), None); + assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None); + assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None); + assert_eq!(map.len(), 5); + + // Nonexistent key (insert) + match map.raw_entry_mut().from_key(&10) { + Occupied(_) => unreachable!(), + Vacant(view) => { + assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000)); + } + } + assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000)); + assert_eq!(map.len(), 6); + + // Ensure all lookup methods produce equivalent results. + for k in 0..12 { + let hash = compute_hash(&map, k); + let v = map.get(&k).copied(); + let kv = v.as_ref().map(|v| (&k, v)); + + assert_eq!(map.raw_entry().from_key(&k), kv); + assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); + assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); + + match map.raw_entry_mut().from_key(&k) { + Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) { + Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + match map.raw_entry_mut().from_hash(hash, |q| *q == k) { + Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + } + } + + #[test] + fn test_key_without_hash_impl() { + #[derive(Debug)] + struct IntWrapper(u64); + + let mut m: HashMap = HashMap::default(); + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); + } + { + let vacant_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { + RawEntryMut::Occupied(..) => panic!("Found entry for key 0"), + RawEntryMut::Vacant(e) => e, + }; + vacant_entry.insert_with_hasher(0, IntWrapper(0), (), |k| k.0); + } + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_none()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + { + let vacant_entry = match m.raw_entry_mut().from_hash(1, |k| k.0 == 1) { + RawEntryMut::Occupied(..) => panic!("Found entry for key 1"), + RawEntryMut::Vacant(e) => e, + }; + vacant_entry.insert_with_hasher(1, IntWrapper(1), (), |k| k.0); + } + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + { + let occupied_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { + RawEntryMut::Occupied(e) => e, + RawEntryMut::Vacant(..) => panic!("Couldn't find entry for key 0"), + }; + occupied_entry.remove(); + } + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/rustc_entry.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/rustc_entry.rs new file mode 100644 index 000000000000..cb48be0e4486 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/rustc_entry.rs @@ -0,0 +1,567 @@ +use self::RustcEntry::*; +use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut}; +use crate::raw::{Allocator, Bucket, Global, RawTable}; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash}; +use core::mem; + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut letters = HashMap::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// let counter = letters.rustc_entry(ch).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(letters[&'s'], 2); + /// assert_eq!(letters[&'t'], 3); + /// assert_eq!(letters[&'u'], 1); + /// assert_eq!(letters.get(&'y'), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> { + let hash = make_hash(&self.hash_builder, &key); + if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { + RustcEntry::Occupied(RustcOccupiedEntry { + elem, + table: &mut self.table, + }) + } else { + // Ideally we would put this in VacantEntry::insert, but Entry is not + // generic over the BuildHasher and adding a generic parameter would be + // a breaking change. + self.reserve(1); + + RustcEntry::Vacant(RustcVacantEntry { + hash, + key, + table: &mut self.table, + }) + } + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`rustc_entry`] method on [`HashMap`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry +pub enum RustcEntry<'a, K, V, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + Occupied(RustcOccupiedEntry<'a, K, V, A>), + + /// A vacant entry. + Vacant(RustcVacantEntry<'a, K, V, A>), +} + +impl Debug for RustcEntry<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`RustcEntry`] enum. +/// +/// [`RustcEntry`]: enum.RustcEntry.html +pub struct RustcOccupiedEntry<'a, K, V, A = Global> +where + A: Allocator, +{ + elem: Bucket<(K, V)>, + table: &'a mut RawTable<(K, V), A>, +} + +unsafe impl Send for RustcOccupiedEntry<'_, K, V, A> +where + K: Send, + V: Send, + A: Allocator + Send, +{ +} +unsafe impl Sync for RustcOccupiedEntry<'_, K, V, A> +where + K: Sync, + V: Sync, + A: Allocator + Sync, +{ +} + +impl Debug for RustcOccupiedEntry<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`RustcEntry`] enum. +/// +/// [`RustcEntry`]: enum.RustcEntry.html +pub struct RustcVacantEntry<'a, K, V, A = Global> +where + A: Allocator, +{ + hash: u64, + key: K, + table: &'a mut RawTable<(K, V), A>, +} + +impl Debug for RustcVacantEntry<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> { + /// Sets the value of the entry, and returns a RustcOccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.rustc_entry("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), &"horseyland"); + /// ``` + pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> { + match self { + Vacant(entry) => entry.insert_entry(value), + Occupied(mut entry) => { + entry.insert(value); + entry + } + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.rustc_entry("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.rustc_entry("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// let s = "hoho".to_string(); + /// + /// map.rustc_entry("poneyland").or_insert_with(|| s); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(default()), + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + match *self { + Occupied(ref entry) => entry.key(), + Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.rustc_entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.rustc_entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Occupied(mut entry) => { + f(entry.get_mut()); + Occupied(entry) + } + Vacant(entry) => Vacant(entry), + } + } +} + +impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// # fn main() { + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, Option> = HashMap::new(); + /// map.rustc_entry("poneyland").or_default(); + /// + /// assert_eq!(map["poneyland"], None); + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Take the ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// // We delete the entry from the map. + /// o.remove_entry(); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.remove(self.elem).0 } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.get(), &12); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `RustcOccupiedEntry` which may outlive the + /// destruction of the `RustcEntry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same RustcEntry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// *o.into_mut() += 10; + /// } + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Takes the value out of the entry, and returns it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } +} + +impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `RustcVacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { + /// v.into_key(); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the RustcVacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V { + unsafe { + let bucket = self.table.insert_no_grow(self.hash, (self.key, value)); + &mut bucket.as_mut().1 + } + } + + /// Sets the value of the entry with the RustcVacantEntry's key, + /// and returns a RustcOccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { + /// let o = v.insert_entry(37); + /// assert_eq!(o.get(), &37); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> { + let bucket = unsafe { self.table.insert_no_grow(self.hash, (self.key, value)) }; + RustcOccupiedEntry { + elem: bucket, + table: self.table, + } + } +} + +impl IterMut<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} + +impl Drain<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/scopeguard.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/scopeguard.rs new file mode 100644 index 000000000000..382d06043ef6 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/scopeguard.rs @@ -0,0 +1,72 @@ +// Extracted from the scopeguard crate +use core::{ + mem::ManuallyDrop, + ops::{Deref, DerefMut}, + ptr, +}; + +pub struct ScopeGuard +where + F: FnMut(&mut T), +{ + dropfn: F, + value: T, +} + +#[inline] +pub fn guard(value: T, dropfn: F) -> ScopeGuard +where + F: FnMut(&mut T), +{ + ScopeGuard { dropfn, value } +} + +impl ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + pub fn into_inner(guard: Self) -> T { + // Cannot move out of Drop-implementing types, so + // ptr::read the value out of a ManuallyDrop + // Don't use mem::forget as that might invalidate value + let guard = ManuallyDrop::new(guard); + unsafe { + let value = ptr::read(&guard.value); + // read the closure so that it is dropped + let _ = ptr::read(&guard.dropfn); + value + } + } +} + +impl Deref for ScopeGuard +where + F: FnMut(&mut T), +{ + type Target = T; + #[inline] + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl Drop for ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + fn drop(&mut self) { + (self.dropfn)(&mut self.value); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/set.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/set.rs new file mode 100644 index 000000000000..819d01c664ff --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/set.rs @@ -0,0 +1,3106 @@ +use crate::{Equivalent, TryReserveError}; +use core::hash::{BuildHasher, Hash}; +use core::iter::{Chain, FusedIterator}; +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Sub, SubAssign}; +use core::{fmt, mem}; +use map::make_hash; + +use super::map::{self, HashMap, Keys}; +use crate::raw::{Allocator, Global, RawExtractIf}; +use crate::DefaultHashBuilder; + +// Future Optimization (FIXME!) +// ============================= +// +// Iteration over zero sized values is a noop. There is no need +// for `bucket.val` in the case of HashSet. I suppose we would need HKT +// to get rid of it properly. + +/// A hash set implemented as a `HashMap` where the value is `()`. +/// +/// As with the [`HashMap`] type, a `HashSet` requires that the elements +/// implement the [`Eq`] and [`Hash`] traits. This can frequently be achieved by +/// using `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself, +/// it is important that the following property holds: +/// +/// ```text +/// k1 == k2 -> hash(k1) == hash(k2) +/// ``` +/// +/// In other words, if two keys are equal, their hashes must be equal. +/// +/// +/// It is a logic error for an item to be modified in such a way that the +/// item's hash, as determined by the [`Hash`] trait, or its equality, as +/// determined by the [`Eq`] trait, changes while it is in the set. This is +/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or +/// unsafe code. +/// +/// It is also a logic error for the [`Hash`] implementation of a key to panic. +/// This is generally only possible if the trait is implemented manually. If a +/// panic does occur then the contents of the `HashSet` may become corrupted and +/// some items may be dropped from the table. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashSet; +/// // Type inference lets us omit an explicit type signature (which +/// // would be `HashSet` in this example). +/// let mut books = HashSet::new(); +/// +/// // Add some books. +/// books.insert("A Dance With Dragons".to_string()); +/// books.insert("To Kill a Mockingbird".to_string()); +/// books.insert("The Odyssey".to_string()); +/// books.insert("The Great Gatsby".to_string()); +/// +/// // Check for a specific one. +/// if !books.contains("The Winds of Winter") { +/// println!("We have {} books, but The Winds of Winter ain't one.", +/// books.len()); +/// } +/// +/// // Remove a book. +/// books.remove("The Odyssey"); +/// +/// // Iterate over everything. +/// for book in &books { +/// println!("{}", book); +/// } +/// ``` +/// +/// The easiest way to use `HashSet` with a custom type is to derive +/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`]. This will in the +/// future be implied by [`Eq`]. +/// +/// ``` +/// use hashbrown::HashSet; +/// #[derive(Hash, Eq, PartialEq, Debug)] +/// struct Viking { +/// name: String, +/// power: usize, +/// } +/// +/// let mut vikings = HashSet::new(); +/// +/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); +/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); +/// vikings.insert(Viking { name: "Olaf".to_string(), power: 4 }); +/// vikings.insert(Viking { name: "Harald".to_string(), power: 8 }); +/// +/// // Use derived implementation to print the vikings. +/// for x in &vikings { +/// println!("{:?}", x); +/// } +/// ``` +/// +/// A `HashSet` with fixed list of elements can be initialized from an array: +/// +/// ``` +/// use hashbrown::HashSet; +/// +/// let viking_names: HashSet<&'static str> = +/// [ "Einar", "Olaf", "Harald" ].into_iter().collect(); +/// // use the values stored in the set +/// ``` +/// +/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`HashMap`]: struct.HashMap.html +/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html +pub struct HashSet { + pub(crate) map: HashMap, +} + +impl Clone for HashSet { + fn clone(&self) -> Self { + HashSet { + map: self.map.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.map.clone_from(&source.map); + } +} + +#[cfg(feature = "default-hasher")] +impl HashSet { + /// Creates an empty `HashSet`. + /// + /// The hash set is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_hasher`](HashSet::with_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::new(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Creates an empty `HashSet` with the specified capacity. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_capacity_and_hasher`](HashSet::with_capacity_and_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity(capacity: usize) -> Self { + Self { + map: HashMap::with_capacity(capacity), + } + } +} + +#[cfg(feature = "default-hasher")] +impl HashSet { + /// Creates an empty `HashSet`. + /// + /// The hash set is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_hasher_in`](HashSet::with_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::new(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new_in(alloc: A) -> Self { + Self { + map: HashMap::new_in(alloc), + } + } + + /// Creates an empty `HashSet` with the specified capacity. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_capacity_and_hasher_in`](HashSet::with_capacity_and_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self { + map: HashMap::with_capacity_in(capacity, alloc), + } + } +} + +impl HashSet { + /// Returns the number of elements the set can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(100); + /// assert!(set.capacity() >= 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.map.capacity() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set = HashSet::new(); + /// set.insert("a"); + /// set.insert("b"); + /// + /// // Will print in an arbitrary order. + /// for x in set.iter() { + /// println!("{}", x); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + iter: self.map.keys(), + } + } + + /// Returns the number of elements in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// assert_eq!(v.len(), 0); + /// v.insert(1); + /// assert_eq!(v.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.map.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// assert!(v.is_empty()); + /// v.insert(1); + /// assert!(!v.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// assert!(!set.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in set.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(set.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> Drain<'_, T, A> { + Drain { + iter: self.map.drain(), + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let xs = [1,2,3,4,5,6]; + /// let mut set: HashSet = xs.into_iter().collect(); + /// set.retain(|&k| k % 2 == 0); + /// assert_eq!(set.len(), 3); + /// ``` + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + self.map.retain(|k, _| f(k)); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all elements `e` such that `f(&e)` returns `true` out + /// into another iterator. + /// + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// + /// [`retain()`]: HashSet::retain + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = (0..8).collect(); + /// let drained: HashSet = set.extract_if(|v| v % 2 == 0).collect(); + /// + /// let mut evens = drained.into_iter().collect::>(); + /// let mut odds = set.into_iter().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F, A> + where + F: FnMut(&T) -> bool, + { + ExtractIf { + f, + inner: RawExtractIf { + iter: unsafe { self.map.table.iter() }, + table: &mut self.map.table, + }, + } + } + + /// Clears the set, removing all values. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// v.insert(1); + /// v.clear(); + /// assert!(v.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + self.map.clear(); + } +} + +impl HashSet { + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// The hash set is initially created with a capacity of 0, so it will not + /// allocate until it is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the `HashSet` to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_hasher(s); + /// set.insert(2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher(hasher: S) -> Self { + Self { + map: HashMap::with_hasher(hasher), + } + } + + /// Creates an empty `HashSet` with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the `HashSet` to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_capacity_and_hasher(10, s); + /// set.insert(1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self { + map: HashMap::with_capacity_and_hasher(capacity, hasher), + } + } +} + +impl HashSet +where + A: Allocator, +{ + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + self.map.allocator() + } + + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// The hash set is initially created with a capacity of 0, so it will not + /// allocate until it is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the `HashSet` to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_hasher(s); + /// set.insert(2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher_in(hasher: S, alloc: A) -> Self { + Self { + map: HashMap::with_hasher_in(hasher, alloc), + } + } + + /// Creates an empty `HashSet` with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the `HashSet` to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_capacity_and_hasher(10, s); + /// set.insert(1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher_in(capacity: usize, hasher: S, alloc: A) -> Self { + Self { + map: HashMap::with_capacity_and_hasher_in(capacity, hasher, alloc), + } + } + + /// Returns a reference to the set's [`BuildHasher`]. + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::DefaultHashBuilder; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let set: HashSet = HashSet::with_hasher(hasher); + /// let hasher: &DefaultHashBuilder = set.hasher(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn hasher(&self) -> &S { + self.map.hasher() + } +} + +impl HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashSet::try_reserve) instead + /// if you want to handle memory allocation failure. + /// + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.reserve(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize) { + self.map.reserve(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.map.try_reserve(additional) + } + + /// Shrinks the capacity of the set as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::with_capacity(100); + /// set.insert(1); + /// set.insert(2); + /// assert!(set.capacity() >= 100); + /// set.shrink_to_fit(); + /// assert!(set.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_fit(&mut self) { + self.map.shrink_to_fit(); + } + + /// Shrinks the capacity of the set with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::with_capacity(100); + /// set.insert(1); + /// set.insert(2); + /// assert!(set.capacity() >= 100); + /// set.shrink_to(10); + /// assert!(set.capacity() >= 10); + /// set.shrink_to(0); + /// assert!(set.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.map.shrink_to(min_capacity); + } + + /// Visits the values representing the difference, + /// i.e., the values that are in `self` but not in `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect(); + /// + /// // Can be seen as `a - b`. + /// for x in a.difference(&b) { + /// println!("{}", x); // Print 1 + /// } + /// + /// let diff: HashSet<_> = a.difference(&b).collect(); + /// assert_eq!(diff, [1].iter().collect()); + /// + /// // Note that difference is not symmetric, + /// // and `b - a` means something else: + /// let diff: HashSet<_> = b.difference(&a).collect(); + /// assert_eq!(diff, [4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S, A> { + Difference { + iter: self.iter(), + other, + } + } + + /// Visits the values representing the symmetric difference, + /// i.e., the values that are in `self` or in `other` but not in both. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect(); + /// + /// // Print 1, 4 in arbitrary order. + /// for x in a.symmetric_difference(&b) { + /// println!("{}", x); + /// } + /// + /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect(); + /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect(); + /// + /// assert_eq!(diff1, diff2); + /// assert_eq!(diff1, [1, 4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S, A> { + SymmetricDifference { + iter: self.difference(other).chain(other.difference(self)), + } + } + + /// Visits the values representing the intersection, + /// i.e., the values that are both in `self` and `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect(); + /// + /// // Print 2, 3 in arbitrary order. + /// for x in a.intersection(&b) { + /// println!("{}", x); + /// } + /// + /// let intersection: HashSet<_> = a.intersection(&b).collect(); + /// assert_eq!(intersection, [2, 3].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S, A> { + let (smaller, larger) = if self.len() <= other.len() { + (self, other) + } else { + (other, self) + }; + Intersection { + iter: smaller.iter(), + other: larger, + } + } + + /// Visits the values representing the union, + /// i.e., all the values in `self` or `other`, without duplicates. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect(); + /// + /// // Print 1, 2, 3, 4 in arbitrary order. + /// for x in a.union(&b) { + /// println!("{}", x); + /// } + /// + /// let union: HashSet<_> = a.union(&b).collect(); + /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S, A> { + // We'll iterate one set in full, and only the remaining difference from the other. + // Use the smaller set for the difference in order to reduce hash lookups. + let (smaller, larger) = if self.len() <= other.len() { + (self, other) + } else { + (other, self) + }; + Union { + iter: larger.iter().chain(smaller.difference(larger)), + } + } + + /// Returns `true` if the set contains a value. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// assert_eq!(set.contains(&1), true); + /// assert_eq!(set.contains(&4), false); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn contains(&self, value: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.map.contains_key(value) + } + + /// Returns a reference to the value in the set, if any, that is equal to the given value. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// assert_eq!(set.get(&2), Some(&2)); + /// assert_eq!(set.get(&4), None); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self, value: &Q) -> Option<&T> + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.map.get_key_value(value) { + Some((k, _)) => Some(k), + None => None, + } + } + + /// Inserts the given `value` into the set if it is not present, then + /// returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// assert_eq!(set.len(), 3); + /// assert_eq!(set.get_or_insert(2), &2); + /// assert_eq!(set.get_or_insert(100), &100); + /// assert_eq!(set.len(), 4); // 100 was inserted + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_or_insert(&mut self, value: T) -> &T { + let hash = make_hash(&self.map.hash_builder, &value); + let bucket = match self.map.find_or_find_insert_slot(hash, &value) { + Ok(bucket) => bucket, + Err(slot) => unsafe { self.map.table.insert_in_slot(hash, slot, (value, ())) }, + }; + unsafe { &bucket.as_ref().0 } + } + + /// Inserts a value computed from `f` into the set if the given `value` is + /// not present, then returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = ["cat", "dog", "horse"] + /// .iter().map(|&pet| pet.to_owned()).collect(); + /// + /// assert_eq!(set.len(), 3); + /// for &pet in &["cat", "dog", "fish"] { + /// let value = set.get_or_insert_with(pet, str::to_owned); + /// assert_eq!(value, pet); + /// } + /// assert_eq!(set.len(), 4); // a new "fish" was inserted + /// ``` + /// + /// The following example will panic because the new value doesn't match. + /// + /// ```should_panic + /// let mut set = hashbrown::HashSet::new(); + /// set.get_or_insert_with("rust", |_| String::new()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T + where + Q: Hash + Equivalent + ?Sized, + F: FnOnce(&Q) -> T, + { + let hash = make_hash(&self.map.hash_builder, value); + let bucket = match self.map.find_or_find_insert_slot(hash, value) { + Ok(bucket) => bucket, + Err(slot) => { + let new = f(value); + assert!(value.equivalent(&new), "new value is not equivalent"); + unsafe { self.map.table.insert_in_slot(hash, slot, (new, ())) } + } + }; + unsafe { &bucket.as_ref().0 } + } + + /// Gets the given value's corresponding entry in the set for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_set::Entry::*; + /// + /// let mut singles = HashSet::new(); + /// let mut dupes = HashSet::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// if let Vacant(dupe_entry) = dupes.entry(ch) { + /// // We haven't already seen a duplicate, so + /// // check if we've at least seen it once. + /// match singles.entry(ch) { + /// Vacant(single_entry) => { + /// // We found a new character for the first time. + /// single_entry.insert(); + /// } + /// Occupied(single_entry) => { + /// // We've already seen this once, "move" it to dupes. + /// single_entry.remove(); + /// dupe_entry.insert(); + /// } + /// } + /// } + /// } + /// + /// assert!(!singles.contains(&'t') && dupes.contains(&'t')); + /// assert!(singles.contains(&'u') && !dupes.contains(&'u')); + /// assert!(!singles.contains(&'v') && !dupes.contains(&'v')); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry(&mut self, value: T) -> Entry<'_, T, S, A> { + match self.map.entry(value) { + map::Entry::Occupied(entry) => Entry::Occupied(OccupiedEntry { inner: entry }), + map::Entry::Vacant(entry) => Entry::Vacant(VacantEntry { inner: entry }), + } + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// let mut b = HashSet::new(); + /// + /// assert_eq!(a.is_disjoint(&b), true); + /// b.insert(4); + /// assert_eq!(a.is_disjoint(&b), true); + /// b.insert(1); + /// assert_eq!(a.is_disjoint(&b), false); + /// ``` + pub fn is_disjoint(&self, other: &Self) -> bool { + self.intersection(other).next().is_none() + } + + /// Returns `true` if the set is a subset of another, + /// i.e., `other` contains at least all the values in `self`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let sup: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.is_subset(&sup), true); + /// set.insert(2); + /// assert_eq!(set.is_subset(&sup), true); + /// set.insert(4); + /// assert_eq!(set.is_subset(&sup), false); + /// ``` + pub fn is_subset(&self, other: &Self) -> bool { + self.len() <= other.len() && self.iter().all(|v| other.contains(v)) + } + + /// Returns `true` if the set is a superset of another, + /// i.e., `self` contains at least all the values in `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let sub: HashSet<_> = [1, 2].into_iter().collect(); + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.is_superset(&sub), false); + /// + /// set.insert(0); + /// set.insert(1); + /// assert_eq!(set.is_superset(&sub), false); + /// + /// set.insert(2); + /// assert_eq!(set.is_superset(&sub), true); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_superset(&self, other: &Self) -> bool { + other.is_subset(self) + } + + /// Adds a value to the set. + /// + /// If the set did not have this value present, `true` is returned. + /// + /// If the set did have this value present, `false` is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.insert(2), true); + /// assert_eq!(set.insert(2), false); + /// assert_eq!(set.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: T) -> bool { + self.map.insert(value, ()).is_none() + } + + /// Insert a value the set without checking if the value already exists in the set. + /// + /// This operation is faster than regular insert, because it does not perform + /// lookup before insertion. + /// + /// This operation is useful during initial population of the set. + /// For example, when constructing a set from another set, we know + /// that values are unique. + /// + /// # Safety + /// + /// This operation is safe if a value does not exist in the set. + /// + /// However, if a value exists in the set already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the set + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// However this operation is still unsafe because the resulting `HashSet` + /// may be passed to unsafe code which does expect the set to behave + /// correctly, and would cause unsoundness as a result. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn insert_unique_unchecked(&mut self, value: T) -> &T { + self.map.insert_unique_unchecked(value, ()).0 + } + + /// Adds a value to the set, replacing the existing value, if any, that is equal to the given + /// one. Returns the replaced value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// set.insert(Vec::::new()); + /// + /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0); + /// set.replace(Vec::with_capacity(10)); + /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace(&mut self, value: T) -> Option { + let hash = make_hash(&self.map.hash_builder, &value); + match self.map.find_or_find_insert_slot(hash, &value) { + Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().0 }, value)), + Err(slot) => { + unsafe { + self.map.table.insert_in_slot(hash, slot, (value, ())); + } + None + } + } + } + + /// Removes a value from the set. Returns whether the value was + /// present in the set. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// + /// set.insert(2); + /// assert_eq!(set.remove(&2), true); + /// assert_eq!(set.remove(&2), false); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(&mut self, value: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.map.remove(value).is_some() + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].into_iter().collect(); + /// assert_eq!(set.take(&2), Some(2)); + /// assert_eq!(set.take(&2), None); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn take(&mut self, value: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.map.remove_entry(value) { + Some((k, _)) => Some(k), + None => None, + } + } + + /// Returns the total amount of memory allocated internally by the hash + /// set, in bytes. + /// + /// The returned number is informational only. It is intended to be + /// primarily used for memory profiling. + #[inline] + pub fn allocation_size(&self) -> usize { + self.map.allocation_size() + } +} + +impl PartialEq for HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter().all(|key| other.contains(key)) + } +} + +impl Eq for HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ +} + +impl fmt::Debug for HashSet +where + T: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +impl From> for HashSet +where + A: Allocator, +{ + fn from(map: HashMap) -> Self { + Self { map } + } +} + +impl FromIterator for HashSet +where + T: Eq + Hash, + S: BuildHasher + Default, + A: Default + Allocator, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn from_iter>(iter: I) -> Self { + let mut set = Self::with_hasher_in(Default::default(), Default::default()); + set.extend(iter); + set + } +} + +// The default hasher is used to match the std implementation signature +#[cfg(feature = "default-hasher")] +impl From<[T; N]> for HashSet +where + T: Eq + Hash, + A: Default + Allocator, +{ + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set1 = HashSet::from([1, 2, 3, 4]); + /// let set2: HashSet<_> = [1, 2, 3, 4].into(); + /// assert_eq!(set1, set2); + /// ``` + fn from(arr: [T; N]) -> Self { + arr.into_iter().collect() + } +} + +impl Extend for HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: I) { + self.map.extend(iter.into_iter().map(|k| (k, ()))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, k: T) { + self.map.insert(k, ()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(T, ())>::extend_reserve(&mut self.map, additional); + } +} + +impl<'a, T, S, A> Extend<&'a T> for HashSet +where + T: 'a + Eq + Hash + Copy, + S: BuildHasher, + A: Allocator, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: I) { + self.extend(iter.into_iter().copied()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, k: &'a T) { + self.map.insert(*k, ()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(T, ())>::extend_reserve(&mut self.map, additional); + } +} + +impl Default for HashSet +where + S: Default, + A: Default + Allocator, +{ + /// Creates an empty `HashSet` with the `Default` value for the hasher. + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + map: HashMap::default(), + } + } +} + +impl BitOr<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, + A: Allocator + Default, +{ + type Output = HashSet; + + /// Returns the union of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a | &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 3, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitor(self, rhs: &HashSet) -> HashSet { + self.union(rhs).cloned().collect() + } +} + +impl BitAnd<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, + A: Allocator + Default, +{ + type Output = HashSet; + + /// Returns the intersection of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![2, 3, 4].into_iter().collect(); + /// + /// let set = &a & &b; + /// + /// let mut i = 0; + /// let expected = [2, 3]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitand(self, rhs: &HashSet) -> HashSet { + self.intersection(rhs).cloned().collect() + } +} + +impl BitXor<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, + A: Allocator + Default, +{ + type Output = HashSet; + + /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a ^ &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitxor(self, rhs: &HashSet) -> HashSet { + self.symmetric_difference(rhs).cloned().collect() + } +} + +impl Sub<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, + A: Allocator + Default, +{ + type Output = HashSet; + + /// Returns the difference of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a - &b; + /// + /// let mut i = 0; + /// let expected = [1, 2]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn sub(self, rhs: &HashSet) -> HashSet { + self.difference(rhs).cloned().collect() + } +} + +impl BitOrAssign<&HashSet> for HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher, + A: Allocator, +{ + /// Modifies this set to contain the union of `self` and `rhs`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// a |= &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 3, 4, 5]; + /// for x in &a { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitor_assign(&mut self, rhs: &HashSet) { + for item in rhs { + if !self.contains(item) { + self.insert(item.clone()); + } + } + } +} + +impl BitAndAssign<&HashSet> for HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher, + A: Allocator, +{ + /// Modifies this set to contain the intersection of `self` and `rhs`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![2, 3, 4].into_iter().collect(); + /// + /// a &= &b; + /// + /// let mut i = 0; + /// let expected = [2, 3]; + /// for x in &a { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitand_assign(&mut self, rhs: &HashSet) { + self.retain(|item| rhs.contains(item)); + } +} + +impl BitXorAssign<&HashSet> for HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher, + A: Allocator, +{ + /// Modifies this set to contain the symmetric difference of `self` and `rhs`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// a ^= &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 4, 5]; + /// for x in &a { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitxor_assign(&mut self, rhs: &HashSet) { + for item in rhs { + let hash = make_hash(&self.map.hash_builder, item); + match self.map.find_or_find_insert_slot(hash, item) { + Ok(bucket) => unsafe { + self.map.table.remove(bucket); + }, + Err(slot) => unsafe { + self.map + .table + .insert_in_slot(hash, slot, (item.clone(), ())); + }, + } + } + } +} + +impl SubAssign<&HashSet> for HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher, + A: Allocator, +{ + /// Modifies this set to contain the difference of `self` and `rhs`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// a -= &b; + /// + /// let mut i = 0; + /// let expected = [1, 2]; + /// for x in &a { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn sub_assign(&mut self, rhs: &HashSet) { + if rhs.len() < self.len() { + for item in rhs { + self.remove(item); + } + } else { + self.retain(|item| !rhs.contains(item)); + } + } +} + +/// An iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`iter`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`iter`]: struct.HashSet.html#method.iter +pub struct Iter<'a, K> { + iter: Keys<'a, K, ()>, +} + +/// An owning iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashSet`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`into_iter`]: struct.HashSet.html#method.into_iter +pub struct IntoIter { + iter: map::IntoIter, +} + +/// A draining iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`drain`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`drain`]: struct.HashSet.html#method.drain +pub struct Drain<'a, K, A: Allocator = Global> { + iter: map::Drain<'a, K, (), A>, +} + +/// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by the [`extract_if`] method on [`HashSet`]. See its +/// documentation for more. +/// +/// [`extract_if`]: struct.HashSet.html#method.extract_if +/// [`HashSet`]: struct.HashSet.html +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, K, F, A: Allocator = Global> +where + F: FnMut(&K) -> bool, +{ + f: F, + inner: RawExtractIf<'a, (K, ()), A>, +} + +/// A lazy iterator producing elements in the intersection of `HashSet`s. +/// +/// This `struct` is created by the [`intersection`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`intersection`]: struct.HashSet.html#method.intersection +pub struct Intersection<'a, T, S, A: Allocator = Global> { + // iterator of the first set + iter: Iter<'a, T>, + // the second set + other: &'a HashSet, +} + +/// A lazy iterator producing elements in the difference of `HashSet`s. +/// +/// This `struct` is created by the [`difference`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`difference`]: struct.HashSet.html#method.difference +pub struct Difference<'a, T, S, A: Allocator = Global> { + // iterator of the first set + iter: Iter<'a, T>, + // the second set + other: &'a HashSet, +} + +/// A lazy iterator producing elements in the symmetric difference of `HashSet`s. +/// +/// This `struct` is created by the [`symmetric_difference`] method on +/// [`HashSet`]. See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference +pub struct SymmetricDifference<'a, T, S, A: Allocator = Global> { + iter: Chain, Difference<'a, T, S, A>>, +} + +/// A lazy iterator producing elements in the union of `HashSet`s. +/// +/// This `struct` is created by the [`union`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`union`]: struct.HashSet.html#method.union +pub struct Union<'a, T, S, A: Allocator = Global> { + iter: Chain, Difference<'a, T, S, A>>, +} + +impl<'a, T, S, A: Allocator> IntoIterator for &'a HashSet { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl IntoIterator for HashSet { + type Item = T; + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves each value out + /// of the set in arbitrary order. The set cannot be used after calling + /// this. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set = HashSet::new(); + /// set.insert("a".to_string()); + /// set.insert("b".to_string()); + /// + /// // Not possible to collect to a Vec with a regular `.iter()`. + /// let v: Vec = set.into_iter().collect(); + /// + /// // Will print in an arbitrary order. + /// for x in &v { + /// println!("{}", x); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IntoIter { + IntoIter { + iter: self.map.into_iter(), + } + } +} + +impl Clone for Iter<'_, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} +impl Default for Iter<'_, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Iter { + iter: Default::default(), + } + } +} +impl<'a, K> Iterator for Iter<'a, K> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a K> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} +impl ExactSizeIterator for Iter<'_, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for Iter<'_, K> {} + +impl fmt::Debug for Iter<'_, K> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Default for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + IntoIter { + iter: Default::default(), + } + } +} +impl Iterator for IntoIter { + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.iter.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, |acc, (k, ())| f(acc, k)) + } +} +impl ExactSizeIterator for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let entries_iter = self.iter.iter().map(|(k, _)| k); + f.debug_list().entries(entries_iter).finish() + } +} + +impl Iterator for Drain<'_, K, A> { + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.iter.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, |acc, (k, ())| f(acc, k)) + } +} +impl ExactSizeIterator for Drain<'_, K, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for Drain<'_, K, A> {} + +impl fmt::Debug for Drain<'_, K, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let entries_iter = self.iter.iter().map(|(k, _)| k); + f.debug_list().entries(entries_iter).finish() + } +} + +impl Iterator for ExtractIf<'_, K, F, A> +where + F: FnMut(&K) -> bool, +{ + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + self.inner + .next(|&mut (ref k, ())| (self.f)(k)) + .map(|(k, ())| k) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} + +impl Clone for Intersection<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Intersection { + iter: self.iter.clone(), + ..*self + } + } +} + +impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + loop { + let elt = self.iter.next()?; + if self.other.contains(elt) { + return Some(elt); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, |acc, elt| { + if self.other.contains(elt) { + f(acc, elt) + } else { + acc + } + }) + } +} + +impl fmt::Debug for Intersection<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl FusedIterator for Intersection<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ +} + +impl Clone for Difference<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Difference { + iter: self.iter.clone(), + ..*self + } + } +} + +impl<'a, T, S, A> Iterator for Difference<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + loop { + let elt = self.iter.next()?; + if !self.other.contains(elt) { + return Some(elt); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.iter.size_hint(); + (lower.saturating_sub(self.other.len()), upper) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, |acc, elt| { + if self.other.contains(elt) { + acc + } else { + f(acc, elt) + } + }) + } +} + +impl FusedIterator for Difference<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ +} + +impl fmt::Debug for Difference<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Clone for SymmetricDifference<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + SymmetricDifference { + iter: self.iter.clone(), + } + } +} + +impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + self.iter.next() + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +impl FusedIterator for SymmetricDifference<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ +} + +impl fmt::Debug for SymmetricDifference<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Clone for Union<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Union { + iter: self.iter.clone(), + } + } +} + +impl FusedIterator for Union<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ +} + +impl fmt::Debug for Union<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl<'a, T, S, A> Iterator for Union<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + self.iter.next() + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +/// A view into a single entry in a set, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashSet`]. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`entry`]: struct.HashSet.html#method.entry +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_set::{Entry, HashSet, OccupiedEntry}; +/// +/// let mut set = HashSet::new(); +/// set.extend(["a", "b", "c"]); +/// assert_eq!(set.len(), 3); +/// +/// // Existing value (insert) +/// let entry: Entry<_, _> = set.entry("a"); +/// let _raw_o: OccupiedEntry<_, _> = entry.insert(); +/// assert_eq!(set.len(), 3); +/// // Nonexistent value (insert) +/// set.entry("d").insert(); +/// +/// // Existing value (or_insert) +/// set.entry("b").or_insert(); +/// // Nonexistent value (or_insert) +/// set.entry("e").or_insert(); +/// +/// println!("Our HashSet: {:?}", set); +/// +/// let mut vec: Vec<_> = set.iter().copied().collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); +/// ``` +pub enum Entry<'a, T, S, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// let mut set: HashSet<_> = ["a", "b"].into(); + /// + /// match set.entry("a") { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => { } + /// } + /// ``` + Occupied(OccupiedEntry<'a, T, S, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// match set.entry("a") { + /// Entry::Occupied(_) => unreachable!(), + /// Entry::Vacant(_) => { } + /// } + /// ``` + Vacant(VacantEntry<'a, T, S, A>), +} + +impl fmt::Debug for Entry<'_, T, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashSet`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_set::{Entry, HashSet, OccupiedEntry}; +/// +/// let mut set = HashSet::new(); +/// set.extend(["a", "b", "c"]); +/// +/// let _entry_o: OccupiedEntry<_, _> = set.entry("a").insert(); +/// assert_eq!(set.len(), 3); +/// +/// // Existing key +/// match set.entry("a") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.get(), &"a"); +/// } +/// } +/// +/// assert_eq!(set.len(), 3); +/// +/// // Existing key (take) +/// match set.entry("c") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove(), "c"); +/// } +/// } +/// assert_eq!(set.get(&"c"), None); +/// assert_eq!(set.len(), 2); +/// ``` +pub struct OccupiedEntry<'a, T, S, A: Allocator = Global> { + inner: map::OccupiedEntry<'a, T, (), S, A>, +} + +impl fmt::Debug for OccupiedEntry<'_, T, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashSet`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_set::{Entry, HashSet, VacantEntry}; +/// +/// let mut set = HashSet::<&str>::new(); +/// +/// let entry_v: VacantEntry<_, _> = match set.entry("a") { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert(); +/// assert!(set.contains("a") && set.len() == 1); +/// +/// // Nonexistent key (insert) +/// match set.entry("b") { +/// Entry::Vacant(view) => { view.insert(); }, +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(set.contains("b") && set.len() == 2); +/// ``` +pub struct VacantEntry<'a, T, S, A: Allocator = Global> { + inner: map::VacantEntry<'a, T, (), S, A>, +} + +impl fmt::Debug for VacantEntry<'_, T, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.get()).finish() + } +} + +impl<'a, T, S, A: Allocator> Entry<'a, T, S, A> { + /// Sets the value of the entry, and returns an `OccupiedEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// let entry = set.entry("horseyland").insert(); + /// + /// assert_eq!(entry.get(), &"horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self) -> OccupiedEntry<'a, T, S, A> + where + T: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(), + } + } + + /// Ensures a value is in the entry by inserting if it was vacant. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// // nonexistent key + /// set.entry("poneyland").or_insert(); + /// assert!(set.contains("poneyland")); + /// + /// // existing key + /// set.entry("poneyland").or_insert(); + /// assert!(set.contains("poneyland")); + /// assert_eq!(set.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self) + where + T: Hash, + S: BuildHasher, + { + if let Entry::Vacant(entry) = self { + entry.insert(); + } + } + + /// Returns a reference to this entry's value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// set.entry("poneyland").or_insert(); + /// // existing key + /// assert_eq!(set.entry("poneyland").get(), &"poneyland"); + /// // nonexistent key + /// assert_eq!(set.entry("horseland").get(), &"horseland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &T { + match *self { + Entry::Occupied(ref entry) => entry.get(), + Entry::Vacant(ref entry) => entry.get(), + } + } +} + +impl OccupiedEntry<'_, T, S, A> { + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// set.entry("poneyland").or_insert(); + /// + /// match set.entry("poneyland") { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &T { + self.inner.key() + } + + /// Takes the value out of the entry, and returns it. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_set::Entry; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// // The set is empty + /// assert!(set.is_empty() && set.capacity() == 0); + /// + /// set.entry("poneyland").or_insert(); + /// let capacity_before_remove = set.capacity(); + /// + /// if let Entry::Occupied(o) = set.entry("poneyland") { + /// assert_eq!(o.remove(), "poneyland"); + /// } + /// + /// assert_eq!(set.contains("poneyland"), false); + /// // Now set hold none elements but capacity is equal to the old one + /// assert!(set.len() == 0 && set.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> T { + self.inner.remove_entry().0 + } +} + +impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> { + /// Gets a reference to the value that would be used when inserting + /// through the `VacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// assert_eq!(set.entry("poneyland").get(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &T { + self.inner.key() + } + + /// Take ownership of the value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// match set.entry("poneyland") { + /// Entry::Occupied(_) => panic!(), + /// Entry::Vacant(v) => assert_eq!(v.into_value(), "poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_value(self) -> T { + self.inner.into_key() + } + + /// Sets the value of the entry with the `VacantEntry`'s value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_set::Entry; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// if let Entry::Vacant(o) = set.entry("poneyland") { + /// o.insert(); + /// } + /// assert!(set.contains("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self) -> OccupiedEntry<'a, T, S, A> + where + T: Hash, + S: BuildHasher, + { + OccupiedEntry { + inner: self.inner.insert_entry(()), + } + } +} + +#[allow(dead_code)] +fn assert_covariance() { + fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> { + v + } + fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { + v + } + fn into_iter<'new, A: Allocator>(v: IntoIter<&'static str, A>) -> IntoIter<&'new str, A> { + v + } + fn difference<'a, 'new, A: Allocator>( + v: Difference<'a, &'static str, DefaultHashBuilder, A>, + ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn symmetric_difference<'a, 'new, A: Allocator>( + v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, + ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn intersection<'a, 'new, A: Allocator>( + v: Intersection<'a, &'static str, DefaultHashBuilder, A>, + ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn union<'a, 'new, A: Allocator>( + v: Union<'a, &'static str, DefaultHashBuilder, A>, + ) -> Union<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn drain<'new, A: Allocator>(d: Drain<'static, &'static str, A>) -> Drain<'new, &'new str, A> { + d + } +} + +#[cfg(test)] +mod test_set { + use super::{make_hash, Equivalent, HashSet}; + use crate::DefaultHashBuilder; + use std::vec::Vec; + + #[test] + fn test_zero_capacities() { + type HS = HashSet; + + let s = HS::new(); + assert_eq!(s.capacity(), 0); + + let s = HS::default(); + assert_eq!(s.capacity(), 0); + + let s = HS::with_hasher(DefaultHashBuilder::default()); + assert_eq!(s.capacity(), 0); + + let s = HS::with_capacity(0); + assert_eq!(s.capacity(), 0); + + let s = HS::with_capacity_and_hasher(0, DefaultHashBuilder::default()); + assert_eq!(s.capacity(), 0); + + let mut s = HS::new(); + s.insert(1); + s.insert(2); + s.remove(&1); + s.remove(&2); + s.shrink_to_fit(); + assert_eq!(s.capacity(), 0); + + let mut s = HS::new(); + s.reserve(0); + assert_eq!(s.capacity(), 0); + } + + #[test] + fn test_disjoint() { + let mut xs = HashSet::new(); + let mut ys = HashSet::new(); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(xs.insert(5)); + assert!(ys.insert(11)); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(xs.insert(7)); + assert!(xs.insert(19)); + assert!(xs.insert(4)); + assert!(ys.insert(2)); + assert!(ys.insert(-11)); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(ys.insert(7)); + assert!(!xs.is_disjoint(&ys)); + assert!(!ys.is_disjoint(&xs)); + } + + #[test] + fn test_subset_and_superset() { + let mut a = HashSet::new(); + assert!(a.insert(0)); + assert!(a.insert(5)); + assert!(a.insert(11)); + assert!(a.insert(7)); + + let mut b = HashSet::new(); + assert!(b.insert(0)); + assert!(b.insert(7)); + assert!(b.insert(19)); + assert!(b.insert(250)); + assert!(b.insert(11)); + assert!(b.insert(200)); + + assert!(!a.is_subset(&b)); + assert!(!a.is_superset(&b)); + assert!(!b.is_subset(&a)); + assert!(!b.is_superset(&a)); + + assert!(b.insert(5)); + + assert!(a.is_subset(&b)); + assert!(!a.is_superset(&b)); + assert!(!b.is_subset(&a)); + assert!(b.is_superset(&a)); + } + + #[test] + fn test_iterate() { + let mut a = HashSet::new(); + for i in 0..32 { + assert!(a.insert(i)); + } + let mut observed: u32 = 0; + for k in &a { + observed |= 1 << *k; + } + assert_eq!(observed, 0xFFFF_FFFF); + } + + #[test] + fn test_intersection() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(11)); + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(77)); + assert!(a.insert(103)); + assert!(a.insert(5)); + assert!(a.insert(-5)); + + assert!(b.insert(2)); + assert!(b.insert(11)); + assert!(b.insert(77)); + assert!(b.insert(-9)); + assert!(b.insert(-42)); + assert!(b.insert(5)); + assert!(b.insert(3)); + + let mut i = 0; + let expected = [3, 5, 11, 77]; + for x in a.intersection(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(3)); + assert!(b.insert(9)); + + let mut i = 0; + let expected = [1, 5, 11]; + for x in a.difference(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_symmetric_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(-2)); + assert!(b.insert(3)); + assert!(b.insert(9)); + assert!(b.insert(14)); + assert!(b.insert(22)); + + let mut i = 0; + let expected = [-2, 1, 5, 11, 14, 22]; + for x in a.symmetric_difference(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_union() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + assert!(a.insert(16)); + assert!(a.insert(19)); + assert!(a.insert(24)); + + assert!(b.insert(-2)); + assert!(b.insert(1)); + assert!(b.insert(5)); + assert!(b.insert(9)); + assert!(b.insert(13)); + assert!(b.insert(19)); + + let mut i = 0; + let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; + for x in a.union(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_from_map() { + let mut a = crate::HashMap::new(); + a.insert(1, ()); + a.insert(2, ()); + a.insert(3, ()); + a.insert(4, ()); + + let a: HashSet<_> = a.into(); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + } + + #[test] + fn test_from_iter() { + let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9]; + + let set: HashSet<_> = xs.iter().copied().collect(); + + for x in &xs { + assert!(set.contains(x)); + } + + assert_eq!(set.iter().len(), xs.len() - 1); + } + + #[test] + fn test_move_iter() { + let hs = { + let mut hs = HashSet::new(); + + hs.insert('a'); + hs.insert('b'); + + hs + }; + + let v = hs.into_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } + + #[test] + fn test_eq() { + // These constants once happened to expose a bug in insert(). + // I'm keeping them around to prevent a regression. + let mut s1 = HashSet::new(); + + s1.insert(1); + s1.insert(2); + s1.insert(3); + + let mut s2 = HashSet::new(); + + s2.insert(1); + s2.insert(2); + + assert!(s1 != s2); + + s2.insert(3); + + assert_eq!(s1, s2); + } + + #[test] + fn test_show() { + let mut set = HashSet::new(); + let empty = HashSet::::new(); + + set.insert(1); + set.insert(2); + + let set_str = format!("{set:?}"); + + assert!(set_str == "{1, 2}" || set_str == "{2, 1}"); + assert_eq!(format!("{empty:?}"), "{}"); + } + + #[test] + fn test_trivial_drain() { + let mut s = HashSet::::new(); + for _ in s.drain() {} + assert!(s.is_empty()); + drop(s); + + let mut s = HashSet::::new(); + drop(s.drain()); + assert!(s.is_empty()); + } + + #[test] + fn test_drain() { + let mut s: HashSet<_> = (1..100).collect(); + + // try this a bunch of times to make sure we don't screw up internal state. + for _ in 0..20 { + assert_eq!(s.len(), 99); + + { + let mut last_i = 0; + let mut d = s.drain(); + for (i, x) in d.by_ref().take(50).enumerate() { + last_i = i; + assert!(x != 0); + } + assert_eq!(last_i, 49); + } + + if !s.is_empty() { + panic!("s should be empty!"); + } + + // reset to try again. + s.extend(1..100); + } + } + + #[test] + fn test_replace() { + use core::hash; + + #[derive(Debug)] + #[allow(dead_code)] + struct Foo(&'static str, i32); + + impl PartialEq for Foo { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for Foo {} + + impl hash::Hash for Foo { + fn hash(&self, h: &mut H) { + self.0.hash(h); + } + } + + let mut s = HashSet::new(); + assert_eq!(s.replace(Foo("a", 1)), None); + assert_eq!(s.len(), 1); + assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1))); + assert_eq!(s.len(), 1); + + let mut it = s.iter(); + assert_eq!(it.next(), Some(&Foo("a", 2))); + assert_eq!(it.next(), None); + } + + #[test] + #[allow(clippy::needless_borrow)] + fn test_extend_ref() { + let mut a = HashSet::new(); + a.insert(1); + + a.extend([2, 3, 4]); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + + let mut b = HashSet::new(); + b.insert(5); + b.insert(6); + + a.extend(&b); + + assert_eq!(a.len(), 6); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + assert!(a.contains(&5)); + assert!(a.contains(&6)); + } + + #[test] + fn test_retain() { + let xs = [1, 2, 3, 4, 5, 6]; + let mut set: HashSet = xs.iter().copied().collect(); + set.retain(|&k| k % 2 == 0); + assert_eq!(set.len(), 3); + assert!(set.contains(&2)); + assert!(set.contains(&4)); + assert!(set.contains(&6)); + } + + #[test] + fn test_extract_if() { + { + let mut set: HashSet = (0..8).collect(); + let drained = set.extract_if(|&k| k % 2 == 0); + let mut out = drained.collect::>(); + out.sort_unstable(); + assert_eq!(vec![0, 2, 4, 6], out); + assert_eq!(set.len(), 4); + } + { + let mut set: HashSet = (0..8).collect(); + set.extract_if(|&k| k % 2 == 0).for_each(drop); + assert_eq!(set.len(), 4, "Removes non-matching items on drop"); + } + } + + #[test] + fn test_const_with_hasher() { + use core::hash::BuildHasher; + use std::collections::hash_map::DefaultHasher; + + #[derive(Clone)] + struct MyHasher; + impl BuildHasher for MyHasher { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> DefaultHasher { + DefaultHasher::new() + } + } + + const EMPTY_SET: HashSet = HashSet::with_hasher(MyHasher); + + let mut set = EMPTY_SET; + set.insert(19); + assert!(set.contains(&19)); + } + + #[test] + fn rehash_in_place() { + let mut set = HashSet::new(); + + for i in 0..224 { + set.insert(i); + } + + assert_eq!( + set.capacity(), + 224, + "The set must be at or close to capacity to trigger a re hashing" + ); + + for i in 100..1400 { + set.remove(&(i - 100)); + set.insert(i); + } + } + + #[test] + fn collect() { + // At the time of writing, this hits the ZST case in from_base_index + // (and without the `map`, it does not). + let mut _set: HashSet<_> = (0..3).map(|_| ()).collect(); + } + + #[test] + fn test_allocation_info() { + assert_eq!(HashSet::<()>::new().allocation_size(), 0); + assert_eq!(HashSet::::new().allocation_size(), 0); + assert!(HashSet::::with_capacity(1).allocation_size() > core::mem::size_of::()); + } + + #[test] + fn duplicate_insert() { + let mut set = HashSet::new(); + set.insert(1); + set.get_or_insert_with(&1, |_| 1); + set.get_or_insert_with(&1, |_| 1); + assert!([1].iter().eq(set.iter())); + } + + #[test] + #[should_panic] + fn some_invalid_equivalent() { + use core::hash::{Hash, Hasher}; + struct Invalid { + count: u32, + other: u32, + } + + struct InvalidRef { + count: u32, + other: u32, + } + + impl PartialEq for Invalid { + fn eq(&self, other: &Self) -> bool { + self.count == other.count && self.other == other.other + } + } + impl Eq for Invalid {} + + impl Equivalent for InvalidRef { + fn equivalent(&self, key: &Invalid) -> bool { + self.count == key.count && self.other == key.other + } + } + impl Hash for Invalid { + fn hash(&self, state: &mut H) { + self.count.hash(state); + } + } + impl Hash for InvalidRef { + fn hash(&self, state: &mut H) { + self.count.hash(state); + } + } + let mut set: HashSet = HashSet::new(); + let key = InvalidRef { count: 1, other: 1 }; + let value = Invalid { count: 1, other: 2 }; + if make_hash(set.hasher(), &key) == make_hash(set.hasher(), &value) { + set.get_or_insert_with(&key, |_| value); + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/table.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/table.rs new file mode 100644 index 000000000000..7f665b75a158 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/src/table.rs @@ -0,0 +1,2383 @@ +use core::{fmt, iter::FusedIterator, marker::PhantomData}; + +use crate::{ + raw::{ + Allocator, Bucket, Global, InsertSlot, RawDrain, RawExtractIf, RawIntoIter, RawIter, + RawIterHash, RawTable, + }, + TryReserveError, +}; + +/// Low-level hash table with explicit hashing. +/// +/// The primary use case for this type over [`HashMap`] or [`HashSet`] is to +/// support types that do not implement the [`Hash`] and [`Eq`] traits, but +/// instead require additional data not contained in the key itself to compute a +/// hash and compare two elements for equality. +/// +/// Examples of when this can be useful include: +/// - An `IndexMap` implementation where indices into a `Vec` are stored as +/// elements in a `HashTable`. Hashing and comparing the elements +/// requires indexing the associated `Vec` to get the actual value referred to +/// by the index. +/// - Avoiding re-computing a hash when it is already known. +/// - Mutating the key of an element in a way that doesn't affect its hash. +/// +/// To achieve this, `HashTable` methods that search for an element in the table +/// require a hash value and equality function to be explicitly passed in as +/// arguments. The method will then iterate over the elements with the given +/// hash and call the equality function on each of them, until a match is found. +/// +/// In most cases, a `HashTable` will not be exposed directly in an API. It will +/// instead be wrapped in a helper type which handles the work of calculating +/// hash values and comparing elements. +/// +/// Due to its low-level nature, this type provides fewer guarantees than +/// [`HashMap`] and [`HashSet`]. Specifically, the API allows you to shoot +/// yourself in the foot by having multiple elements with identical keys in the +/// table. The table itself will still function correctly and lookups will +/// arbitrarily return one of the matching elements. However you should avoid +/// doing this because it changes the runtime of hash table operations from +/// `O(1)` to `O(k)` where `k` is the number of duplicate entries. +/// +/// [`HashMap`]: super::HashMap +/// [`HashSet`]: super::HashSet +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +pub struct HashTable +where + A: Allocator, +{ + pub(crate) raw: RawTable, +} + +impl HashTable { + /// Creates an empty `HashTable`. + /// + /// The hash table is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let mut table: HashTable<&str> = HashTable::new(); + /// assert_eq!(table.len(), 0); + /// assert_eq!(table.capacity(), 0); + /// ``` + pub const fn new() -> Self { + Self { + raw: RawTable::new(), + } + } + + /// Creates an empty `HashTable` with the specified capacity. + /// + /// The hash table will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash table will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let mut table: HashTable<&str> = HashTable::with_capacity(10); + /// assert_eq!(table.len(), 0); + /// assert!(table.capacity() >= 10); + /// ``` + pub fn with_capacity(capacity: usize) -> Self { + Self { + raw: RawTable::with_capacity(capacity), + } + } +} + +impl HashTable +where + A: Allocator, +{ + /// Creates an empty `HashTable` using the given allocator. + /// + /// The hash table is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use bumpalo::Bump; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let bump = Bump::new(); + /// let mut table = HashTable::new_in(&bump); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // The created HashTable holds none elements + /// assert_eq!(table.len(), 0); + /// + /// // The created HashTable also doesn't allocate memory + /// assert_eq!(table.capacity(), 0); + /// + /// // Now we insert element inside created HashTable + /// table.insert_unique(hasher(&"One"), "One", hasher); + /// // We can see that the HashTable holds 1 element + /// assert_eq!(table.len(), 1); + /// // And it also allocates some capacity + /// assert!(table.capacity() > 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub const fn new_in(alloc: A) -> Self { + Self { + raw: RawTable::new_in(alloc), + } + } + + /// Creates an empty `HashTable` with the specified capacity using the given allocator. + /// + /// The hash table will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash table will not allocate. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use bumpalo::Bump; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let bump = Bump::new(); + /// let mut table = HashTable::with_capacity_in(5, &bump); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // The created HashTable holds none elements + /// assert_eq!(table.len(), 0); + /// // But it can hold at least 5 elements without reallocating + /// let empty_map_capacity = table.capacity(); + /// assert!(empty_map_capacity >= 5); + /// + /// // Now we insert some 5 elements inside created HashTable + /// table.insert_unique(hasher(&"One"), "One", hasher); + /// table.insert_unique(hasher(&"Two"), "Two", hasher); + /// table.insert_unique(hasher(&"Three"), "Three", hasher); + /// table.insert_unique(hasher(&"Four"), "Four", hasher); + /// table.insert_unique(hasher(&"Five"), "Five", hasher); + /// + /// // We can see that the HashTable holds 5 elements + /// assert_eq!(table.len(), 5); + /// // But its capacity isn't changed + /// assert_eq!(table.capacity(), empty_map_capacity) + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self { + raw: RawTable::with_capacity_in(capacity, alloc), + } + } + + /// Returns a reference to the underlying allocator. + pub fn allocator(&self) -> &A { + self.raw.allocator() + } + + /// Returns a reference to an entry in the table with the given hash and + /// which satisfies the equality function passed. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// table.insert_unique(hasher(&3), 3, hasher); + /// assert_eq!(table.find(hasher(&2), |&val| val == 2), Some(&2)); + /// assert_eq!(table.find(hasher(&4), |&val| val == 4), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + self.raw.get(hash, eq) + } + + /// Returns a mutable reference to an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// When mutating an entry, you should ensure that it still retains the same + /// hash value as when it was inserted, otherwise lookups of that entry may + /// fail to find it. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Some(val) = table.find_mut(hasher(&1), |val| val.0 == 1) { + /// val.1 = "b"; + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), Some(&(1, "b"))); + /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + self.raw.get_mut(hash, eq) + } + + /// Returns an `OccupiedEntry` for an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This can be used to remove the entry from the table. Call + /// [`HashTable::entry`] instead if you wish to insert an entry if the + /// lookup fails. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Ok(entry) = table.find_entry(hasher(&1), |val| val.0 == 1) { + /// entry.remove(); + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn find_entry( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + ) -> Result, AbsentEntry<'_, T, A>> { + match self.raw.find(hash, eq) { + Some(bucket) => Ok(OccupiedEntry { + hash, + bucket, + table: self, + }), + None => Err(AbsentEntry { table: self }), + } + } + + /// Returns an `Entry` for an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This can be used to remove the entry from the table, or insert a new + /// entry with the given hash if one doesn't already exist. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// This method may grow the table in preparation for an insertion. Call + /// [`HashTable::find_entry`] if this is undesirable. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Entry::Occupied(entry) = table.entry(hasher(&1), |val| val.0 == 1, |val| hasher(&val.0)) + /// { + /// entry.remove(); + /// } + /// if let Entry::Vacant(entry) = table.entry(hasher(&2), |val| val.0 == 2, |val| hasher(&val.0)) { + /// entry.insert((2, "b")); + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); + /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), Some(&(2, "b"))); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + hasher: impl Fn(&T) -> u64, + ) -> Entry<'_, T, A> { + match self.raw.find_or_find_insert_slot(hash, eq, hasher) { + Ok(bucket) => Entry::Occupied(OccupiedEntry { + hash, + bucket, + table: self, + }), + Err(insert_slot) => Entry::Vacant(VacantEntry { + hash, + insert_slot, + table: self, + }), + } + } + + /// Inserts an element into the `HashTable` with the given hash value, but + /// without checking whether an equivalent element already exists within the + /// table. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut v = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// v.insert_unique(hasher(&1), 1, hasher); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert_unique( + &mut self, + hash: u64, + value: T, + hasher: impl Fn(&T) -> u64, + ) -> OccupiedEntry<'_, T, A> { + let bucket = self.raw.insert(hash, value, hasher); + OccupiedEntry { + hash, + bucket, + table: self, + } + } + + /// Clears the table, removing all values. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut v = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// v.insert_unique(hasher(&1), 1, hasher); + /// v.clear(); + /// assert!(v.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn clear(&mut self) { + self.raw.clear(); + } + + /// Shrinks the capacity of the table as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::with_capacity(100); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// assert!(table.capacity() >= 100); + /// table.shrink_to_fit(hasher); + /// assert!(table.capacity() >= 2); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn shrink_to_fit(&mut self, hasher: impl Fn(&T) -> u64) { + self.raw.shrink_to(self.len(), hasher) + } + + /// Shrinks the capacity of the table with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::with_capacity(100); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// assert!(table.capacity() >= 100); + /// table.shrink_to(10, hasher); + /// assert!(table.capacity() >= 10); + /// table.shrink_to(0, hasher); + /// assert!(table.capacity() >= 2); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn shrink_to(&mut self, min_capacity: usize, hasher: impl Fn(&T) -> u64) { + self.raw.shrink_to(min_capacity, hasher); + } + + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashTable`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashTable::try_reserve) instead + /// if you want to handle memory allocation failure. + /// + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.reserve(10, hasher); + /// assert!(table.capacity() >= 10); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + self.raw.reserve(additional, hasher) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashTable`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table + /// .try_reserve(10, hasher) + /// .expect("why is the test harness OOMing on 10 bytes?"); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + self.raw.try_reserve(additional, hasher) + } + + /// Returns the number of elements the table can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let table: HashTable = HashTable::with_capacity(100); + /// assert!(table.capacity() >= 100); + /// ``` + pub fn capacity(&self) -> usize { + self.raw.capacity() + } + + /// Returns the number of elements in the table. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// let mut v = HashTable::new(); + /// assert_eq!(v.len(), 0); + /// v.insert_unique(hasher(&1), 1, hasher); + /// assert_eq!(v.len(), 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn len(&self) -> usize { + self.raw.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// let mut v = HashTable::new(); + /// assert!(v.is_empty()); + /// v.insert_unique(hasher(&1), 1, hasher); + /// assert!(!v.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn is_empty(&self) -> bool { + self.raw.is_empty() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"a"), "b", hasher); + /// table.insert_unique(hasher(&"b"), "b", hasher); + /// + /// // Will print in an arbitrary order. + /// for x in table.iter() { + /// println!("{}", x); + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter(&self) -> Iter<'_, T> { + Iter { + inner: unsafe { self.raw.iter() }, + marker: PhantomData, + } + } + + /// An iterator visiting all elements in arbitrary order, + /// with mutable references to the elements. + /// The iterator element type is `&'a mut T`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// table.insert_unique(hasher(&3), 3, hasher); + /// + /// // Update all values + /// for val in table.iter_mut() { + /// *val *= 2; + /// } + /// + /// assert_eq!(table.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in &table { + /// println!("val: {}", val); + /// vec.push(*val); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [2, 4, 6]); + /// + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + IterMut { + inner: unsafe { self.raw.iter() }, + marker: PhantomData, + } + } + + /// An iterator visiting all elements which may match a hash. + /// The iterator element type is `&'a T`. + /// + /// This iterator may return elements from the table that have a hash value + /// different than the one provided. You should always validate the returned + /// values before using them. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"a"), "a", hasher); + /// table.insert_unique(hasher(&"a"), "b", hasher); + /// table.insert_unique(hasher(&"b"), "c", hasher); + /// + /// // Will print "a" and "b" (and possibly "c") in an arbitrary order. + /// for x in table.iter_hash(hasher(&"a")) { + /// println!("{}", x); + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter_hash(&self, hash: u64) -> IterHash<'_, T> { + IterHash { + inner: unsafe { self.raw.iter_hash(hash) }, + marker: PhantomData, + } + } + + /// A mutable iterator visiting all elements which may match a hash. + /// The iterator element type is `&'a mut T`. + /// + /// This iterator may return elements from the table that have a hash value + /// different than the one provided. You should always validate the returned + /// values before using them. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 2, hasher); + /// table.insert_unique(hasher(&1), 3, hasher); + /// table.insert_unique(hasher(&2), 5, hasher); + /// + /// // Update matching values + /// for val in table.iter_hash_mut(hasher(&1)) { + /// *val *= 2; + /// } + /// + /// assert_eq!(table.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in &table { + /// println!("val: {}", val); + /// vec.push(*val); + /// } + /// + /// // The values will contain 4 and 6 and may contain either 5 or 10. + /// assert!(vec.contains(&4)); + /// assert!(vec.contains(&6)); + /// + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter_hash_mut(&mut self, hash: u64) -> IterHashMut<'_, T> { + IterHashMut { + inner: unsafe { self.raw.iter_hash(hash) }, + marker: PhantomData, + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 1..=6 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// table.retain(|&mut x| x % 2 == 0); + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn retain(&mut self, mut f: impl FnMut(&mut T) -> bool) { + // Here we only use `iter` as a temporary, preventing use-after-free + unsafe { + for item in self.raw.iter() { + if !f(item.as_mut()) { + self.raw.erase(item); + } + } + } + } + + /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 1..=3 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// assert!(!table.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in table.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(table.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn drain(&mut self) -> Drain<'_, T, A> { + Drain { + inner: self.raw.drain(), + } + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all elements `e` such that `f(&e)` returns `true` out + /// into another iterator. + /// + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// + /// [`retain()`]: HashTable::retain + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 0..8 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// let drained: Vec = table.extract_if(|&mut v| v % 2 == 0).collect(); + /// + /// let mut evens = drained.into_iter().collect::>(); + /// let mut odds = table.into_iter().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F, A> + where + F: FnMut(&mut T) -> bool, + { + ExtractIf { + f, + inner: RawExtractIf { + iter: unsafe { self.raw.iter() }, + table: &mut self.raw, + }, + } + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be used if the key is missing. + /// + /// # Panics + /// + /// Panics if any keys are overlapping. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Bodleian Library", 1602), + /// ("Athenæum", 1807), + /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// let keys = ["Athenæum", "Library of Congress"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!( + /// got, + /// [Some(&mut ("Athenæum", 1807)), Some(&mut ("Library of Congress", 1800))], + /// ); + /// + /// // Missing keys result in None + /// let keys = ["Athenæum", "New York Public Library"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, [Some(&mut ("Athenæum", 1807)), None]); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + /// + /// ```should_panic + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// # use hashbrown::{HashTable, DefaultHashBuilder}; + /// # use std::hash::BuildHasher; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Athenæum", 1807), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// // Duplicate keys result in a panic! + /// let keys = ["Athenæum", "Athenæum"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test(); + /// # #[cfg(not(feature = "nightly"))] + /// # panic!(); + /// # } + /// ``` + pub fn get_many_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> [Option<&'_ mut T>; N] { + self.raw.get_many_mut(hashes, eq) + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashTable::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Bodleian Library", 1602), + /// ("Athenæum", 1807), + /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// let keys = ["Athenæum", "Library of Congress"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!( + /// got, + /// [Some(&mut ("Athenæum", 1807)), Some(&mut ("Library of Congress", 1800))], + /// ); + /// + /// // Missing keys result in None + /// let keys = ["Athenæum", "New York Public Library"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, [Some(&mut ("Athenæum", 1807)), None]); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub unsafe fn get_many_unchecked_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> [Option<&'_ mut T>; N] { + self.raw.get_many_unchecked_mut(hashes, eq) + } + + /// Returns the total amount of memory allocated internally by the hash + /// table, in bytes. + /// + /// The returned number is informational only. It is intended to be + /// primarily used for memory profiling. + #[inline] + pub fn allocation_size(&self) -> usize { + self.raw.allocation_size() + } +} + +impl IntoIterator for HashTable +where + A: Allocator, +{ + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.raw.into_iter(), + } + } +} + +impl<'a, T, A> IntoIterator for &'a HashTable +where + A: Allocator, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl<'a, T, A> IntoIterator for &'a mut HashTable +where + A: Allocator, +{ + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> IterMut<'a, T> { + self.iter_mut() + } +} + +impl Default for HashTable +where + A: Allocator + Default, +{ + fn default() -> Self { + Self { + raw: Default::default(), + } + } +} + +impl Clone for HashTable +where + T: Clone, + A: Allocator + Clone, +{ + fn clone(&self) -> Self { + Self { + raw: self.raw.clone(), + } + } +} + +impl fmt::Debug for HashTable +where + T: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +/// A view into a single entry in a table, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashTable`]. +/// +/// [`HashTable`]: struct.HashTable.html +/// [`entry`]: struct.HashTable.html#method.entry +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use hashbrown::hash_table::{Entry, OccupiedEntry}; +/// use hashbrown::{HashTable, DefaultHashBuilder}; +/// use std::hash::BuildHasher; +/// +/// let mut table = HashTable::new(); +/// let hasher = DefaultHashBuilder::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// for x in ["a", "b", "c"] { +/// table.insert_unique(hasher(&x), x, hasher); +/// } +/// assert_eq!(table.len(), 3); +/// +/// // Existing value (insert) +/// let entry: Entry<_> = table.entry(hasher(&"a"), |&x| x == "a", hasher); +/// let _raw_o: OccupiedEntry<_, _> = entry.insert("a"); +/// assert_eq!(table.len(), 3); +/// // Nonexistent value (insert) +/// table.entry(hasher(&"d"), |&x| x == "d", hasher).insert("d"); +/// +/// // Existing value (or_insert) +/// table +/// .entry(hasher(&"b"), |&x| x == "b", hasher) +/// .or_insert("b"); +/// // Nonexistent value (or_insert) +/// table +/// .entry(hasher(&"e"), |&x| x == "e", hasher) +/// .or_insert("e"); +/// +/// println!("Our HashTable: {:?}", table); +/// +/// let mut vec: Vec<_> = table.iter().copied().collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub enum Entry<'a, T, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::{Entry, OccupiedEntry}; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in ["a", "b"] { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// + /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => {} + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + Occupied(OccupiedEntry<'a, T, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::{Entry, OccupiedEntry}; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table = HashTable::<&str>::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { + /// Entry::Vacant(_) => {} + /// Entry::Occupied(_) => unreachable!(), + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + Vacant(VacantEntry<'a, T, A>), +} + +impl fmt::Debug for Entry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +impl<'a, T, A> Entry<'a, T, A> +where + A: Allocator, +{ + /// Sets the value of the entry, replacing any existing value if there is + /// one, and returns an [`OccupiedEntry`]. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// let entry = table + /// .entry(hasher(&"horseyland"), |&x| x == "horseyland", hasher) + /// .insert("horseyland"); + /// + /// assert_eq!(entry.get(), &"horseyland"); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(mut entry) => { + *entry.get_mut() = value; + entry + } + Entry::Vacant(entry) => entry.insert(value), + } + } + + /// Ensures a value is in the entry by inserting if it was vacant. + /// + /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // nonexistent key + /// table + /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) + /// .or_insert("poneyland"); + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_some()); + /// + /// // existing key + /// table + /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) + /// .or_insert("poneyland"); + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_some()); + /// assert_eq!(table.len(), 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn or_insert(self, default: T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty.. + /// + /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// table + /// .entry(hasher("poneyland"), |x| x == "poneyland", |val| hasher(val)) + /// .or_insert_with(|| "poneyland".to_string()); + /// + /// assert!(table + /// .find(hasher(&"poneyland"), |x| x == "poneyland") + /// .is_some()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn or_insert_with(self, default: impl FnOnce() -> T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(default()), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the table. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// table + /// .entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) + /// .and_modify(|(_, v)| *v += 1) + /// .or_insert(("poneyland", 42)); + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), + /// Some(&("poneyland", 42)) + /// ); + /// + /// table + /// .entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) + /// .and_modify(|(_, v)| *v += 1) + /// .or_insert(("poneyland", 42)); + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), + /// Some(&("poneyland", 43)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn and_modify(self, f: impl FnOnce(&mut T)) -> Self { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } +} + +/// A view into an occupied entry in a `HashTable`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use hashbrown::hash_table::{Entry, OccupiedEntry}; +/// use hashbrown::{HashTable, DefaultHashBuilder}; +/// use std::hash::BuildHasher; +/// +/// let mut table = HashTable::new(); +/// let hasher = DefaultHashBuilder::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// for x in ["a", "b", "c"] { +/// table.insert_unique(hasher(&x), x, hasher); +/// } +/// assert_eq!(table.len(), 3); +/// +/// let _entry_o: OccupiedEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap(); +/// assert_eq!(table.len(), 3); +/// +/// // Existing key +/// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.get(), &"a"); +/// } +/// } +/// +/// assert_eq!(table.len(), 3); +/// +/// // Existing key (take) +/// match table.entry(hasher(&"c"), |&x| x == "c", hasher) { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove().0, "c"); +/// } +/// } +/// assert_eq!(table.find(hasher(&"c"), |&x| x == "c"), None); +/// assert_eq!(table.len(), 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct OccupiedEntry<'a, T, A = Global> +where + A: Allocator, +{ + hash: u64, + bucket: Bucket, + table: &'a mut HashTable, +} + +unsafe impl Send for OccupiedEntry<'_, T, A> +where + T: Send, + A: Send + Allocator, +{ +} +unsafe impl Sync for OccupiedEntry<'_, T, A> +where + T: Sync, + A: Sync + Allocator, +{ +} + +impl fmt::Debug for OccupiedEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("value", self.get()) + .finish() + } +} + +impl<'a, T, A> OccupiedEntry<'a, T, A> +where + A: Allocator, +{ + /// Takes the value out of the entry, and returns it along with a + /// `VacantEntry` that can be used to insert another value with the same + /// hash as the one that was just removed. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// // The table is empty + /// assert!(table.is_empty() && table.capacity() == 0); + /// + /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); + /// let capacity_before_remove = table.capacity(); + /// + /// if let Entry::Occupied(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// assert_eq!(o.remove().0, "poneyland"); + /// } + /// + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_none()); + /// // Now table hold none elements but capacity is equal to the old one + /// assert!(table.len() == 0 && table.capacity() == capacity_before_remove); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> (T, VacantEntry<'a, T, A>) { + let (val, slot) = unsafe { self.table.raw.remove(self.bucket) }; + ( + val, + VacantEntry { + hash: self.hash, + insert_slot: slot, + table: self.table, + }, + ) + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); + /// + /// match table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + #[inline] + pub fn get(&self) -> &T { + unsafe { self.bucket.as_ref() } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 12)) + /// ); + /// + /// if let Entry::Occupied(mut o) = table.entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) { + /// o.get_mut().1 += 10; + /// assert_eq!(o.get().1, 22); + /// + /// // We can use the same Entry multiple times. + /// o.get_mut().1 += 2; + /// } + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 24)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + #[inline] + pub fn get_mut(&mut self) -> &mut T { + unsafe { self.bucket.as_mut() } + } + + /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry + /// with a lifetime bound to the table itself. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 12)) + /// ); + /// + /// let value: &mut (&str, u32); + /// match table.entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) { + /// Entry::Occupied(entry) => value = entry.into_mut(), + /// Entry::Vacant(_) => panic!(), + /// } + /// value.1 += 10; + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 22)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn into_mut(self) -> &'a mut T { + unsafe { self.bucket.as_mut() } + } + + /// Converts the `OccupiedEntry` into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable { + self.table + } +} + +/// A view into a vacant entry in a `HashTable`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use hashbrown::hash_table::{Entry, VacantEntry}; +/// use hashbrown::{HashTable, DefaultHashBuilder}; +/// use std::hash::BuildHasher; +/// +/// let mut table: HashTable<&str> = HashTable::new(); +/// let hasher = DefaultHashBuilder::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// +/// let entry_v: VacantEntry<_, _> = match table.entry(hasher(&"a"), |&x| x == "a", hasher) { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert("a"); +/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); +/// +/// // Nonexistent key (insert) +/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { +/// Entry::Vacant(view) => { +/// view.insert("b"); +/// } +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct VacantEntry<'a, T, A = Global> +where + A: Allocator, +{ + hash: u64, + insert_slot: InsertSlot, + table: &'a mut HashTable, +} + +impl fmt::Debug for VacantEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("VacantEntry") + } +} + +impl<'a, T, A> VacantEntry<'a, T, A> +where + A: Allocator, +{ + /// Inserts a new element into the table with the hash that was used to + /// obtain the `VacantEntry`. + /// + /// An `OccupiedEntry` is returned for the newly inserted element. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use hashbrown::hash_table::Entry; + /// use hashbrown::{HashTable, DefaultHashBuilder}; + /// use std::hash::BuildHasher; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = DefaultHashBuilder::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// if let Entry::Vacant(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// o.insert("poneyland"); + /// } + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&x| x == "poneyland"), + /// Some(&"poneyland") + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + #[inline] + pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { + let bucket = unsafe { + self.table + .raw + .insert_in_slot(self.hash, self.insert_slot, value) + }; + OccupiedEntry { + hash: self.hash, + bucket, + table: self.table, + } + } + + /// Converts the `VacantEntry` into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable { + self.table + } +} + +/// Type representing the absence of an entry, as returned by [`HashTable::find_entry`]. +/// +/// This type only exists due to [limitations] in Rust's NLL borrow checker. In +/// the future, `find_entry` will return an `Option` and this +/// type will be removed. +/// +/// [limitations]: https://smallcultfollowing.com/babysteps/blog/2018/06/15/mir-based-borrow-check-nll-status-update/#polonius +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use hashbrown::hash_table::{AbsentEntry, Entry}; +/// use hashbrown::{HashTable, DefaultHashBuilder}; +/// use std::hash::BuildHasher; +/// +/// let mut table: HashTable<&str> = HashTable::new(); +/// let hasher = DefaultHashBuilder::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// +/// let entry_v: AbsentEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap_err(); +/// entry_v +/// .into_table() +/// .insert_unique(hasher(&"a"), "a", hasher); +/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); +/// +/// // Nonexistent key (insert) +/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { +/// Entry::Vacant(view) => { +/// view.insert("b"); +/// } +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct AbsentEntry<'a, T, A = Global> +where + A: Allocator, +{ + table: &'a mut HashTable, +} + +impl fmt::Debug for AbsentEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AbsentEntry") + } +} + +impl<'a, T, A> AbsentEntry<'a, T, A> +where + A: Allocator, +{ + /// Converts the `AbsentEntry` into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable { + self.table + } +} + +/// An iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `&'a T`. +/// +/// This `struct` is created by the [`iter`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.HashTable.html#method.iter +/// [`HashTable`]: struct.HashTable.html +pub struct Iter<'a, T> { + inner: RawIter, + marker: PhantomData<&'a T>, +} + +impl Default for Iter<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Iter { + inner: Default::default(), + marker: PhantomData, + } + } +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner + .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_ref()) }) + } +} + +impl ExactSizeIterator for Iter<'_, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for Iter<'_, T> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl<'a, T> Clone for Iter<'a, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Iter<'a, T> { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `&'a mut T`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.HashTable.html#method.iter_mut +/// [`HashTable`]: struct.HashTable.html +pub struct IterMut<'a, T> { + inner: RawIter, + marker: PhantomData<&'a mut T>, +} + +impl Default for IterMut<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + IterMut { + inner: Default::default(), + marker: PhantomData, + } + } +} +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(bucket) => Some(unsafe { bucket.as_mut() }), + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner + .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_mut()) }) + } +} + +impl ExactSizeIterator for IterMut<'_, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IterMut<'_, T> {} + +impl fmt::Debug for IterMut<'_, T> +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(Iter { + inner: self.inner.clone(), + marker: PhantomData, + }) + .finish() + } +} + +/// An iterator over the entries of a `HashTable` that could match a given hash. +/// The iterator element type is `&'a T`. +/// +/// This `struct` is created by the [`iter_hash`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter_hash`]: struct.HashTable.html#method.iter_hash +/// [`HashTable`]: struct.HashTable.html +pub struct IterHash<'a, T> { + inner: RawIterHash, + marker: PhantomData<&'a T>, +} + +impl Default for IterHash<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + IterHash { + inner: Default::default(), + marker: PhantomData, + } + } +} + +impl<'a, T> Iterator for IterHash<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + } + } + + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner + .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_ref()) }) + } +} + +impl FusedIterator for IterHash<'_, T> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl<'a, T> Clone for IterHash<'a, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> IterHash<'a, T> { + IterHash { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for IterHash<'_, T> +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the entries of a `HashTable` that could match a given hash. +/// The iterator element type is `&'a mut T`. +/// +/// This `struct` is created by the [`iter_hash_mut`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter_hash_mut`]: struct.HashTable.html#method.iter_hash_mut +/// [`HashTable`]: struct.HashTable.html +pub struct IterHashMut<'a, T> { + inner: RawIterHash, + marker: PhantomData<&'a mut T>, +} + +impl Default for IterHashMut<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + IterHashMut { + inner: Default::default(), + marker: PhantomData, + } + } +} + +impl<'a, T> Iterator for IterHashMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(bucket) => Some(unsafe { bucket.as_mut() }), + None => None, + } + } + + fn fold(self, init: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner + .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_mut()) }) + } +} + +impl FusedIterator for IterHashMut<'_, T> {} + +impl fmt::Debug for IterHashMut<'_, T> +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(IterHash { + inner: self.inner.clone(), + marker: PhantomData, + }) + .finish() + } +} + +/// An owning iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `T`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashTable`] +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +/// The table cannot be used after calling that method. +/// +/// [`into_iter`]: struct.HashTable.html#method.into_iter +/// [`HashTable`]: struct.HashTable.html +/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html +pub struct IntoIter +where + A: Allocator, +{ + inner: RawIntoIter, +} + +impl Default for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + IntoIter { + inner: Default::default(), + } + } +} + +impl Iterator for IntoIter +where + A: Allocator, +{ + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, f) + } +} + +impl ExactSizeIterator for IntoIter +where + A: Allocator, +{ + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IntoIter where A: Allocator {} + +impl fmt::Debug for IntoIter +where + T: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(Iter { + inner: self.inner.iter(), + marker: PhantomData, + }) + .finish() + } +} + +/// A draining iterator over the items of a `HashTable`. +/// +/// This `struct` is created by the [`drain`] method on [`HashTable`]. +/// See its documentation for more. +/// +/// [`HashTable`]: struct.HashTable.html +/// [`drain`]: struct.HashTable.html#method.drain +pub struct Drain<'a, T, A: Allocator = Global> { + inner: RawDrain<'a, T, A>, +} + +impl Iterator for Drain<'_, T, A> { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.inner.fold(init, f) + } +} + +impl ExactSizeIterator for Drain<'_, T, A> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for Drain<'_, T, A> {} + +impl fmt::Debug for Drain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(Iter { + inner: self.inner.iter(), + marker: PhantomData, + }) + .finish() + } +} + +/// A draining iterator over entries of a `HashTable` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by [`HashTable::extract_if`]. See its +/// documentation for more. +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, T, F, A: Allocator = Global> +where + F: FnMut(&mut T) -> bool, +{ + f: F, + inner: RawExtractIf<'a, T, A>, +} + +impl Iterator for ExtractIf<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next(|val| (self.f)(val)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for ExtractIf<'_, T, F, A> where F: FnMut(&mut T) -> bool {} + +#[cfg(test)] +mod tests { + use super::HashTable; + + #[test] + fn test_allocation_info() { + assert_eq!(HashTable::<()>::new().allocation_size(), 0); + assert_eq!(HashTable::::new().allocation_size(), 0); + assert!(HashTable::::with_capacity(1).allocation_size() > core::mem::size_of::()); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/equivalent_trait.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/equivalent_trait.rs new file mode 100644 index 000000000000..713dddd53c7c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/equivalent_trait.rs @@ -0,0 +1,53 @@ +use hashbrown::Equivalent; +use hashbrown::HashMap; + +use std::hash::Hash; + +#[derive(Debug, Hash)] +pub struct Pair(pub A, pub B); + +impl PartialEq<(A, B)> for Pair +where + C: PartialEq, + D: PartialEq, +{ + fn eq(&self, rhs: &(A, B)) -> bool { + self.0 == rhs.0 && self.1 == rhs.1 + } +} + +impl Equivalent for Pair +where + Pair: PartialEq, + A: Hash + Eq, + B: Hash + Eq, +{ + fn equivalent(&self, other: &X) -> bool { + *self == *other + } +} + +#[test] +fn test_lookup() { + let s = String::from; + let mut map = HashMap::new(); + map.insert((s("a"), s("b")), 1); + map.insert((s("a"), s("x")), 2); + + assert!(map.contains_key(&Pair("a", "b"))); + assert!(!map.contains_key(&Pair("b", "a"))); +} + +#[test] +fn test_string_str() { + let s = String::from; + let mut map = HashMap::new(); + map.insert(s("a"), 1); + map.insert(s("b"), 2); + map.insert(s("x"), 3); + map.insert(s("y"), 4); + + assert!(map.contains_key("a")); + assert!(!map.contains_key("z")); + assert_eq!(map.remove("b"), Some(2)); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/hasher.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/hasher.rs new file mode 100644 index 000000000000..223737844429 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/hasher.rs @@ -0,0 +1,65 @@ +//! Sanity check that alternate hashers work correctly. + +#![cfg(not(miri))] // FIXME: takes too long + +use hashbrown::HashSet; +use std::hash::{BuildHasher, BuildHasherDefault, Hasher}; + +fn check() { + let range = 0..1_000; + + let mut set = HashSet::::default(); + set.extend(range.clone()); + + assert!(!set.contains(&i32::MIN)); + assert!(!set.contains(&(range.start - 1))); + for i in range.clone() { + assert!(set.contains(&i)); + } + assert!(!set.contains(&range.end)); + assert!(!set.contains(&i32::MAX)); +} + +/// Use hashbrown's default hasher. +#[test] +fn default() { + check::(); +} + +/// Use std's default hasher. +#[test] +fn random_state() { + check::(); +} + +/// Use a constant 0 hash. +#[test] +fn zero() { + #[derive(Default)] + struct ZeroHasher; + + impl Hasher for ZeroHasher { + fn finish(&self) -> u64 { + 0 + } + fn write(&mut self, _: &[u8]) {} + } + + check::>(); +} + +/// Use a constant maximum hash. +#[test] +fn max() { + #[derive(Default)] + struct MaxHasher; + + impl Hasher for MaxHasher { + fn finish(&self) -> u64 { + u64::MAX + } + fn write(&mut self, _: &[u8]) {} + } + + check::>(); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/rayon.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/rayon.rs new file mode 100644 index 000000000000..d55e5a9804dd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/rayon.rs @@ -0,0 +1,535 @@ +#![cfg(feature = "rayon")] + +#[macro_use] +extern crate lazy_static; + +use hashbrown::{HashMap, HashSet}; +use rayon::iter::{ + IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelExtend, + ParallelIterator, +}; + +macro_rules! assert_eq3 { + ($e1:expr, $e2:expr, $e3:expr) => {{ + assert_eq!($e1, $e2); + assert_eq!($e1, $e3); + assert_eq!($e2, $e3); + }}; +} + +lazy_static! { + static ref MAP_EMPTY: HashMap = HashMap::new(); + static ref MAP: HashMap = { + let mut m = HashMap::new(); + m.insert('b', 20); + m.insert('a', 10); + m.insert('c', 30); + m.insert('e', 50); + m.insert('f', 60); + m.insert('d', 40); + m + }; +} + +#[test] +fn map_seq_par_equivalence_iter_empty() { + let vec_seq = MAP_EMPTY.iter().collect::>(); + let vec_par = MAP_EMPTY.par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_iter() { + let mut vec_seq = MAP.iter().collect::>(); + let mut vec_par = MAP.par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + (&'a', &10), + (&'b', &20), + (&'c', &30), + (&'d', &40), + (&'e', &50), + (&'f', &60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_keys_empty() { + let vec_seq = MAP_EMPTY.keys().collect::>(); + let vec_par = MAP_EMPTY.par_keys().collect::>(); + + let expected: [&char; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_keys() { + let mut vec_seq = MAP.keys().collect::>(); + let mut vec_par = MAP.par_keys().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_values_empty() { + let vec_seq = MAP_EMPTY.values().collect::>(); + let vec_par = MAP_EMPTY.par_values().collect::>(); + + let expected: [&u32; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_values() { + let mut vec_seq = MAP.values().collect::>(); + let mut vec_par = MAP.par_values().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&10, &20, &30, &40, &50, &60]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_iter_mut_empty() { + let mut map1 = MAP_EMPTY.clone(); + let mut map2 = MAP_EMPTY.clone(); + + let vec_seq = map1.iter_mut().collect::>(); + let vec_par = map2.par_iter_mut().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_iter_mut() { + let mut map1 = MAP.clone(); + let mut map2 = MAP.clone(); + + let mut vec_seq = map1.iter_mut().collect::>(); + let mut vec_par = map2.par_iter_mut().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + (&'a', &mut 10), + (&'b', &mut 20), + (&'c', &mut 30), + (&'d', &mut 40), + (&'e', &mut 50), + (&'f', &mut 60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_values_mut_empty() { + let mut map1 = MAP_EMPTY.clone(); + let mut map2 = MAP_EMPTY.clone(); + + let vec_seq = map1.values_mut().collect::>(); + let vec_par = map2.par_values_mut().collect::>(); + + let expected: [&u32; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_values_mut() { + let mut map1 = MAP.clone(); + let mut map2 = MAP.clone(); + + let mut vec_seq = map1.values_mut().collect::>(); + let mut vec_par = map2.par_values_mut().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&mut 10, &mut 20, &mut 30, &mut 40, &mut 50, &mut 60]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_into_iter_empty() { + let vec_seq = MAP_EMPTY.clone().into_iter().collect::>(); + let vec_par = MAP_EMPTY.clone().into_par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_into_iter() { + let mut vec_seq = MAP.clone().into_iter().collect::>(); + let mut vec_par = MAP.clone().into_par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + ('a', 10), + ('b', 20), + ('c', 30), + ('d', 40), + ('e', 50), + ('f', 60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +lazy_static! { + static ref MAP_VEC_EMPTY: Vec<(char, u32)> = vec![]; + static ref MAP_VEC: Vec<(char, u32)> = vec![ + ('b', 20), + ('a', 10), + ('c', 30), + ('e', 50), + ('f', 60), + ('d', 40), + ]; +} + +#[test] +fn map_seq_par_equivalence_collect_empty() { + let map_expected = MAP_EMPTY.clone(); + let map_seq = MAP_VEC_EMPTY.clone().into_iter().collect::>(); + let map_par = MAP_VEC_EMPTY + .clone() + .into_par_iter() + .collect::>(); + + assert_eq!(map_seq, map_par); + assert_eq!(map_seq, map_expected); + assert_eq!(map_par, map_expected); +} + +#[test] +fn map_seq_par_equivalence_collect() { + let map_expected = MAP.clone(); + let map_seq = MAP_VEC.clone().into_iter().collect::>(); + let map_par = MAP_VEC.clone().into_par_iter().collect::>(); + + assert_eq!(map_seq, map_par); + assert_eq!(map_seq, map_expected); + assert_eq!(map_par, map_expected); +} + +lazy_static! { + static ref MAP_EXISTING_EMPTY: HashMap = HashMap::new(); + static ref MAP_EXISTING: HashMap = { + let mut m = HashMap::new(); + m.insert('b', 20); + m.insert('a', 10); + m + }; + static ref MAP_EXTENSION_EMPTY: Vec<(char, u32)> = vec![]; + static ref MAP_EXTENSION: Vec<(char, u32)> = vec![('c', 30), ('e', 50), ('f', 60), ('d', 40),]; +} + +#[test] +fn map_seq_par_equivalence_existing_empty_extend_empty() { + let expected = HashMap::new(); + let mut map_seq = MAP_EXISTING_EMPTY.clone(); + let mut map_par = MAP_EXISTING_EMPTY.clone(); + + map_seq.extend(MAP_EXTENSION_EMPTY.iter().copied()); + map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_empty_extend() { + let expected = MAP_EXTENSION.iter().copied().collect::>(); + let mut map_seq = MAP_EXISTING_EMPTY.clone(); + let mut map_par = MAP_EXISTING_EMPTY.clone(); + + map_seq.extend(MAP_EXTENSION.iter().copied()); + map_par.par_extend(MAP_EXTENSION.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_extend_empty() { + let expected = MAP_EXISTING.clone(); + let mut map_seq = MAP_EXISTING.clone(); + let mut map_par = MAP_EXISTING.clone(); + + map_seq.extend(MAP_EXTENSION_EMPTY.iter().copied()); + map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_extend() { + let expected = MAP.clone(); + let mut map_seq = MAP_EXISTING.clone(); + let mut map_par = MAP_EXISTING.clone(); + + map_seq.extend(MAP_EXTENSION.iter().copied()); + map_par.par_extend(MAP_EXTENSION.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +lazy_static! { + static ref SET_EMPTY: HashSet = HashSet::new(); + static ref SET: HashSet = { + let mut s = HashSet::new(); + s.insert('b'); + s.insert('a'); + s.insert('c'); + s.insert('e'); + s.insert('f'); + s.insert('d'); + s + }; +} + +#[test] +fn set_seq_par_equivalence_iter_empty() { + let vec_seq = SET_EMPTY.iter().collect::>(); + let vec_par = SET_EMPTY.par_iter().collect::>(); + + let expected: [&char; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn set_seq_par_equivalence_iter() { + let mut vec_seq = SET.iter().collect::>(); + let mut vec_par = SET.par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn set_seq_par_equivalence_into_iter_empty() { + let vec_seq = SET_EMPTY.clone().into_iter().collect::>(); + let vec_par = SET_EMPTY.clone().into_par_iter().collect::>(); + + // Work around type inference failure introduced by rend dev-dependency. + let empty: [char; 0] = []; + assert_eq3!(vec_seq, vec_par, empty); +} + +#[test] +fn set_seq_par_equivalence_into_iter() { + let mut vec_seq = SET.clone().into_iter().collect::>(); + let mut vec_par = SET.clone().into_par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = ['a', 'b', 'c', 'd', 'e', 'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +lazy_static! { + static ref SET_VEC_EMPTY: Vec = vec![]; + static ref SET_VEC: Vec = vec!['b', 'a', 'c', 'e', 'f', 'd',]; +} + +#[test] +fn set_seq_par_equivalence_collect_empty() { + let set_expected = SET_EMPTY.clone(); + let set_seq = SET_VEC_EMPTY.clone().into_iter().collect::>(); + let set_par = SET_VEC_EMPTY + .clone() + .into_par_iter() + .collect::>(); + + assert_eq!(set_seq, set_par); + assert_eq!(set_seq, set_expected); + assert_eq!(set_par, set_expected); +} + +#[test] +fn set_seq_par_equivalence_collect() { + let set_expected = SET.clone(); + let set_seq = SET_VEC.clone().into_iter().collect::>(); + let set_par = SET_VEC.clone().into_par_iter().collect::>(); + + assert_eq!(set_seq, set_par); + assert_eq!(set_seq, set_expected); + assert_eq!(set_par, set_expected); +} + +lazy_static! { + static ref SET_EXISTING_EMPTY: HashSet = HashSet::new(); + static ref SET_EXISTING: HashSet = { + let mut s = HashSet::new(); + s.insert('b'); + s.insert('a'); + s + }; + static ref SET_EXTENSION_EMPTY: Vec = vec![]; + static ref SET_EXTENSION: Vec = vec!['c', 'e', 'f', 'd',]; +} + +#[test] +fn set_seq_par_equivalence_existing_empty_extend_empty() { + let expected = HashSet::new(); + let mut set_seq = SET_EXISTING_EMPTY.clone(); + let mut set_par = SET_EXISTING_EMPTY.clone(); + + set_seq.extend(SET_EXTENSION_EMPTY.iter().copied()); + set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_empty_extend() { + let expected = SET_EXTENSION.iter().copied().collect::>(); + let mut set_seq = SET_EXISTING_EMPTY.clone(); + let mut set_par = SET_EXISTING_EMPTY.clone(); + + set_seq.extend(SET_EXTENSION.iter().copied()); + set_par.par_extend(SET_EXTENSION.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_extend_empty() { + let expected = SET_EXISTING.clone(); + let mut set_seq = SET_EXISTING.clone(); + let mut set_par = SET_EXISTING.clone(); + + set_seq.extend(SET_EXTENSION_EMPTY.iter().copied()); + set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_extend() { + let expected = SET.clone(); + let mut set_seq = SET_EXISTING.clone(); + let mut set_par = SET_EXISTING.clone(); + + set_seq.extend(SET_EXTENSION.iter().copied()); + set_par.par_extend(SET_EXTENSION.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +lazy_static! { + static ref SET_A: HashSet = ['a', 'b', 'c', 'd'].iter().copied().collect(); + static ref SET_B: HashSet = ['a', 'b', 'e', 'f'].iter().copied().collect(); + static ref SET_DIFF_AB: HashSet = ['c', 'd'].iter().copied().collect(); + static ref SET_DIFF_BA: HashSet = ['e', 'f'].iter().copied().collect(); + static ref SET_SYMM_DIFF_AB: HashSet = ['c', 'd', 'e', 'f'].iter().copied().collect(); + static ref SET_INTERSECTION_AB: HashSet = ['a', 'b'].iter().copied().collect(); + static ref SET_UNION_AB: HashSet = + ['a', 'b', 'c', 'd', 'e', 'f'].iter().copied().collect(); +} + +#[test] +fn set_seq_par_equivalence_difference() { + let diff_ab_seq = SET_A.difference(&*SET_B).copied().collect::>(); + let diff_ab_par = SET_A + .par_difference(&*SET_B) + .copied() + .collect::>(); + + assert_eq3!(diff_ab_seq, diff_ab_par, *SET_DIFF_AB); + + let diff_ba_seq = SET_B.difference(&*SET_A).copied().collect::>(); + let diff_ba_par = SET_B + .par_difference(&*SET_A) + .copied() + .collect::>(); + + assert_eq3!(diff_ba_seq, diff_ba_par, *SET_DIFF_BA); +} + +#[test] +fn set_seq_par_equivalence_symmetric_difference() { + let symm_diff_ab_seq = SET_A + .symmetric_difference(&*SET_B) + .copied() + .collect::>(); + let symm_diff_ab_par = SET_A + .par_symmetric_difference(&*SET_B) + .copied() + .collect::>(); + + assert_eq3!(symm_diff_ab_seq, symm_diff_ab_par, *SET_SYMM_DIFF_AB); +} + +#[test] +fn set_seq_par_equivalence_intersection() { + let intersection_ab_seq = SET_A.intersection(&*SET_B).copied().collect::>(); + let intersection_ab_par = SET_A + .par_intersection(&*SET_B) + .copied() + .collect::>(); + + assert_eq3!( + intersection_ab_seq, + intersection_ab_par, + *SET_INTERSECTION_AB + ); +} + +#[test] +fn set_seq_par_equivalence_union() { + let union_ab_seq = SET_A.union(&*SET_B).copied().collect::>(); + let union_ab_par = SET_A.par_union(&*SET_B).copied().collect::>(); + + assert_eq3!(union_ab_seq, union_ab_par, *SET_UNION_AB); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/serde.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/serde.rs new file mode 100644 index 000000000000..a642348b3b3b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/serde.rs @@ -0,0 +1,65 @@ +#![cfg(feature = "serde")] + +use core::hash::BuildHasherDefault; +use fnv::FnvHasher; +use hashbrown::{HashMap, HashSet}; +use serde_test::{assert_tokens, Token}; + +// We use FnvHash for this test because we rely on the ordering +type FnvHashMap = HashMap>; +type FnvHashSet = HashSet>; + +#[test] +fn map_serde_tokens_empty() { + let map = FnvHashMap::::default(); + + assert_tokens(&map, &[Token::Map { len: Some(0) }, Token::MapEnd]); +} + +#[test] +fn map_serde_tokens() { + let mut map = FnvHashMap::default(); + map.insert('b', 20); + map.insert('a', 10); + map.insert('c', 30); + + assert_tokens( + &map, + &[ + Token::Map { len: Some(3) }, + Token::Char('a'), + Token::I32(10), + Token::Char('c'), + Token::I32(30), + Token::Char('b'), + Token::I32(20), + Token::MapEnd, + ], + ); +} + +#[test] +fn set_serde_tokens_empty() { + let set = FnvHashSet::::default(); + + assert_tokens(&set, &[Token::Seq { len: Some(0) }, Token::SeqEnd]); +} + +#[test] +fn set_serde_tokens() { + let mut set = FnvHashSet::default(); + set.insert(20); + set.insert(10); + set.insert(30); + + assert_tokens( + &set, + &[ + Token::Seq { len: Some(3) }, + Token::I32(30), + Token::I32(20), + Token::I32(10), + Token::SeqEnd, + ], + ); +} diff --git a/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/set.rs b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/set.rs new file mode 100644 index 000000000000..86ec9647664b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/hashbrown-0.15.1/tests/set.rs @@ -0,0 +1,34 @@ +#![cfg(not(miri))] // FIXME: takes too long + +use hashbrown::HashSet; +use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng}; +use std::iter; + +#[test] +fn test_hashset_insert_remove() { + let mut m: HashSet> = HashSet::new(); + let seed = u64::from_le_bytes(*b"testseed"); + + let rng = &mut SmallRng::seed_from_u64(seed); + let tx: Vec> = iter::repeat_with(|| { + rng.sample_iter(&Alphanumeric) + .take(32) + .map(char::from) + .collect() + }) + .take(4096) + .collect(); + + // more readable with explicit `true` / `false` + #[allow(clippy::bool_assert_comparison)] + for _ in 0..32 { + for x in &tx { + assert_eq!(m.contains(x), false); + assert_eq!(m.insert(x.clone()), true); + } + for (i, x) in tx.iter().enumerate() { + println!("removing {i} {x:?}"); + assert_eq!(m.remove(x), true); + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.cargo_vcs_info.json new file mode 100644 index 000000000000..b13a75f9a64a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "bf0362ba25ad3cade401e8314c9ab7aafc638db8" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.github/workflows/ci.yml new file mode 100644 index 000000000000..98c7750797dd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.github/workflows/ci.yml @@ -0,0 +1,142 @@ +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + merge_group: + +name: CI + +env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + +jobs: + tests: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - rust: 1.63.0 # MSRV + features: + - rust: stable + features: arbitrary + - rust: stable + features: quickcheck + - rust: stable + features: rayon + - rust: stable + features: rustc-rayon + - rust: stable + features: serde + - rust: stable + features: borsh + - rust: stable + features: std + - rust: beta + features: + - rust: nightly + bench: test build benchmarks + + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: matrix.rust == '1.63.0' + with: + path: ~/.cargo/registry/index + key: cargo-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + - name: Tests + run: | + cargo build --verbose --features "${{ matrix.features }}" + cargo doc --verbose --features "${{ matrix.features }}" + cargo test --verbose --features "${{ matrix.features }}" + cargo test --release --verbose --features "${{ matrix.features }}" + - name: Tests (serde) + if: matrix.features == 'serde' + run: | + cargo test --verbose -p test-serde + - name: Test run benchmarks + if: matrix.bench != '' + run: cargo test -v --benches + + nostd_build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - rust: 1.63.0 + target: thumbv6m-none-eabi + - rust: stable + target: thumbv6m-none-eabi + + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + if: matrix.rust == '1.63.0' + with: + path: ~/.cargo/registry/index + key: cargo-git-index + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.rust }} + target: ${{ matrix.target }} + - name: Tests + run: | + cargo build -vv --target=${{ matrix.target }} --no-default-features + cargo build -v -p test-nostd --target=${{ matrix.target }} + + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@beta + with: + components: clippy + - run: cargo clippy --all-features + + miri: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + components: miri + - run: cargo miri test + + minimal-versions: + name: Check MSRV and minimal-versions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: ~/.cargo/registry/index + key: cargo-git-index + - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/rust-toolchain@1.63.0 # MSRV + - uses: taiki-e/install-action@v2 + with: + tool: cargo-hack + - run: cargo +nightly hack generate-lockfile --remove-dev-deps -Z direct-minimal-versions + - name: Build (nightly) + run: cargo +nightly build --verbose --all-features + - name: Build (MSRV) + run: cargo build --verbose --features arbitrary,quickcheck,serde,rayon + + # One job that "summarizes" the success state of this pipeline. This can then be added to branch + # protection, rather than having to add each job separately. + success: + name: Success + runs-on: ubuntu-latest + needs: [tests, nostd_build, clippy, miri, minimal-versions] + # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency + # failed" as success. So we have to do some contortions to ensure the job fails if any of its + # dependencies fails. + if: always() # make sure this is never "skipped" + steps: + # Manually check the status of all dependencies. `if: failure()` does not work. + - name: check if any dependency failed + run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.gitignore b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.gitignore similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.gitignore rename to third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.gitignore diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.rustfmt.toml b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.rustfmt.toml new file mode 100644 index 000000000000..3a26366d4da6 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/.rustfmt.toml @@ -0,0 +1 @@ +edition = "2021" diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/Cargo.toml new file mode 100644 index 000000000000..5104c9397bf9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/Cargo.toml @@ -0,0 +1,152 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.63" +name = "indexmap" +version = "2.6.0" +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "A hash table with consistent order and fast iteration." +documentation = "https://docs.rs/indexmap/" +readme = "README.md" +keywords = [ + "hashmap", + "no_std", +] +categories = [ + "data-structures", + "no-std", +] +license = "Apache-2.0 OR MIT" +repository = "https://github.com/indexmap-rs/indexmap" + +[package.metadata.docs.rs] +features = [ + "arbitrary", + "quickcheck", + "serde", + "borsh", + "rayon", +] +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[package.metadata.release] +allow-branch = ["master"] +sign-tag = true +tag-name = "{{version}}" + +[profile.bench] +debug = 2 + +[lib] +name = "indexmap" +path = "src/lib.rs" +bench = false + +[[test]] +name = "equivalent_trait" +path = "tests/equivalent_trait.rs" + +[[test]] +name = "macros_full_path" +path = "tests/macros_full_path.rs" + +[[test]] +name = "quick" +path = "tests/quick.rs" + +[[test]] +name = "tests" +path = "tests/tests.rs" + +[[bench]] +name = "bench" +path = "benches/bench.rs" + +[[bench]] +name = "faststring" +path = "benches/faststring.rs" + +[dependencies.arbitrary] +version = "1.0" +optional = true +default-features = false + +[dependencies.borsh] +version = "1.2" +optional = true +default-features = false + +[dependencies.equivalent] +version = "1.0" +default-features = false + +[dependencies.hashbrown] +version = "0.15.0" +default-features = false + +[dependencies.quickcheck] +version = "1.0" +optional = true +default-features = false + +[dependencies.rayon] +version = "1.5.3" +optional = true + +[dependencies.rustc-rayon] +version = "0.5" +optional = true +package = "rustc-rayon" + +[dependencies.serde] +version = "1.0" +optional = true +default-features = false + +[dev-dependencies.fnv] +version = "1.0" + +[dev-dependencies.fxhash] +version = "0.2.1" + +[dev-dependencies.itertools] +version = "0.13" + +[dev-dependencies.lazy_static] +version = "1.3" + +[dev-dependencies.quickcheck] +version = "1.0" +default-features = false + +[dev-dependencies.rand] +version = "0.8" +features = ["small_rng"] + +[dev-dependencies.serde_derive] +version = "1.0" + +[features] +default = ["std"] +std = [] +test_debug = [] + +[lints.clippy] +style = "allow" diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/Cargo.toml.orig new file mode 100644 index 000000000000..959ea4c6dd9d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/Cargo.toml.orig @@ -0,0 +1,65 @@ +[package] +name = "indexmap" +edition = "2021" +version = "2.6.0" +documentation = "https://docs.rs/indexmap/" +repository = "https://github.com/indexmap-rs/indexmap" +license = "Apache-2.0 OR MIT" +description = "A hash table with consistent order and fast iteration." +keywords = ["hashmap", "no_std"] +categories = ["data-structures", "no-std"] +rust-version = "1.63" + +[lib] +bench = false + +[dependencies] +equivalent = { version = "1.0", default-features = false } + +arbitrary = { version = "1.0", optional = true, default-features = false } +quickcheck = { version = "1.0", optional = true, default-features = false } +serde = { version = "1.0", optional = true, default-features = false } +borsh = { version = "1.2", optional = true, default-features = false } +rayon = { version = "1.5.3", optional = true } + +# Internal feature, only used when building as part of rustc, +# not part of the stable interface of this crate. +rustc-rayon = { package = "rustc-rayon", version = "0.5", optional = true } + +[dependencies.hashbrown] +version = "0.15.0" +default-features = false + +[dev-dependencies] +itertools = "0.13" +rand = {version = "0.8", features = ["small_rng"] } +quickcheck = { version = "1.0", default-features = false } +fnv = "1.0" +lazy_static = "1.3" +fxhash = "0.2.1" +serde_derive = "1.0" + +[features] +default = ["std"] +std = [] + +# for testing only, of course +test_debug = [] + +[profile.bench] +debug = true + +[package.metadata.release] +allow-branch = ["master"] +sign-tag = true +tag-name = "{{version}}" + +[package.metadata.docs.rs] +features = ["arbitrary", "quickcheck", "serde", "borsh", "rayon"] +rustdoc-args = ["--cfg", "docsrs"] + +[workspace] +members = ["test-nostd", "test-serde"] + +[lints.clippy] +style = "allow" diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/LICENSE-APACHE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/LICENSE-APACHE rename to third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/LICENSE-APACHE diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/LICENSE-MIT new file mode 100644 index 000000000000..8b8181068b3c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016--2017 + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/README.md b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/README.md new file mode 100644 index 000000000000..2585b232f820 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/README.md @@ -0,0 +1,59 @@ +# indexmap + +[![build status](https://github.com/indexmap-rs/indexmap/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/indexmap-rs/indexmap/actions) +[![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap) +[![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap) +[![rustc](https://img.shields.io/badge/rust-1.63%2B-orange.svg)](https://img.shields.io/badge/rust-1.63%2B-orange.svg) + +A pure-Rust hash table which preserves (in a limited sense) insertion order. + +This crate implements compact map and set data-structures, +where the iteration order of the keys is independent from their hash or +value. It preserves insertion order (except after removals), and it +allows lookup of entries by either hash table key or numerical index. + +Note: this crate was originally released under the name `ordermap`, +but it was renamed to `indexmap` to better reflect its features. +The [`ordermap`](https://crates.io/crates/ordermap) crate now exists +as a wrapper over `indexmap` with stronger ordering properties. + +# Background + +This was inspired by Python 3.6's new dict implementation (which remembers +the insertion order and is fast to iterate, and is compact in memory). + +Some of those features were translated to Rust, and some were not. The result +was indexmap, a hash table that has following properties: + +- Order is **independent of hash function** and hash values of keys. +- Fast to iterate. +- Indexed in compact space. +- Preserves insertion order **as long** as you don't call `.remove()`, + `.swap_remove()`, or other methods that explicitly change order. + The alternate `.shift_remove()` does preserve relative order. +- Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does. + +## Performance + +`IndexMap` derives a couple of performance facts directly from how it is constructed, +which is roughly: + +> A raw hash table of key-value indices, and a vector of key-value pairs. + +- Iteration is very fast since it is on the dense key-values. +- Removal is fast since it moves memory areas only in the table, + and uses a single swap in the vector. +- Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are + densely stored. Lookup also is slow-ish since the actual key-value pairs are stored + separately. (Visible when cpu caches size is limiting.) + +- In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and + the performance was roughly on par across the whole workload. +- If you want the properties of `IndexMap`, or its strongest performance points + fits your workload, it might be the best hash table implementation. + +[PR45282]: https://github.com/rust-lang/rust/pull/45282 + +# Recent Changes + +See [RELEASES.md](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md). diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/RELEASES.md b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/RELEASES.md new file mode 100644 index 000000000000..b1f7a5191a7a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/RELEASES.md @@ -0,0 +1,531 @@ +# Releases + +## 2.6.0 (2024-10-01) + +- Implemented `Clone` for `map::IntoIter` and `set::IntoIter`. +- Updated the `hashbrown` dependency to version 0.15. + +## 2.5.0 (2024-08-30) + +- Added an `insert_before` method to `IndexMap` and `IndexSet`, as an + alternative to `shift_insert` with different behavior on existing entries. +- Added `first_entry` and `last_entry` methods to `IndexMap`. +- Added `From` implementations between `IndexedEntry` and `OccupiedEntry`. + +## 2.4.0 (2024-08-13) + +- Added methods `IndexMap::append` and `IndexSet::append`, moving all items from + one map or set into another, and leaving the original capacity for reuse. + +## 2.3.0 (2024-07-31) + +- Added trait `MutableEntryKey` for opt-in mutable access to map entry keys. +- Added method `MutableKeys::iter_mut2` for opt-in mutable iteration of map + keys and values. + +## 2.2.6 (2024-03-22) + +- Added trait `MutableValues` for opt-in mutable access to set values. + +## 2.2.5 (2024-02-29) + +- Added optional `borsh` serialization support. + +## 2.2.4 (2024-02-28) + +- Added an `insert_sorted` method on `IndexMap`, `IndexSet`, and `VacantEntry`. +- Avoid hashing for lookups in single-entry maps. +- Limit preallocated memory in `serde` deserializers. + +## 2.2.3 (2024-02-11) + +- Added `move_index` and `swap_indices` methods to `IndexedEntry`, + `OccupiedEntry`, and `RawOccupiedEntryMut`, functioning like the existing + methods on `IndexMap`. +- Added `shift_insert` methods on `VacantEntry` and `RawVacantEntryMut`, as + well as `shift_insert_hashed_nocheck` on the latter, to insert the new entry + at a particular index. +- Added `shift_insert` methods on `IndexMap` and `IndexSet` to insert a new + entry at a particular index, or else move an existing entry there. + +## 2.2.2 (2024-01-31) + +- Added indexing methods to raw entries: `RawEntryBuilder::from_hash_full`, + `RawEntryBuilder::index_from_hash`, and `RawEntryMut::index`. + +## 2.2.1 (2024-01-28) + +- Corrected the signature of `RawOccupiedEntryMut::into_key(self) -> &'a mut K`, + This a breaking change from 2.2.0, but that version was published for less + than a day and has now been yanked. + +## 2.2.0 (2024-01-28) + +- The new `IndexMap::get_index_entry` method finds an entry by its index for + in-place manipulation. + +- The `Keys` iterator now implements `Index` for quick access to the + entry's key, compared to indexing the map to get the value. + +- The new `IndexMap::splice` and `IndexSet::splice` methods will drain the + given range as an iterator, and then replace that range with entries from + an input iterator. + +- The new trait `RawEntryApiV1` offers opt-in access to a raw entry API for + `IndexMap`, corresponding to the unstable API on `HashSet` as of Rust 1.75. + +- Many `IndexMap` and `IndexSet` methods have relaxed their type constraints, + e.g. removing `K: Hash` on methods that don't actually need to hash. + +- Removal methods `remove`, `remove_entry`, and `take` are now deprecated + in favor of their `shift_` or `swap_` prefixed variants, which are more + explicit about their effect on the index and order of remaining items. + The deprecated methods will remain to guide drop-in replacements from + `HashMap` and `HashSet` toward the prefixed methods. + +## 2.1.0 (2023-10-31) + +- Empty slices can now be created with `map::Slice::{new, new_mut}` and + `set::Slice::new`. In addition, `Slice::new`, `len`, and `is_empty` are + now `const` functions on both types. + +- `IndexMap`, `IndexSet`, and their respective `Slice`s all have binary + search methods for sorted data: map `binary_search_keys` and set + `binary_search` for plain comparison, `binary_search_by` for custom + comparators, `binary_search_by_key` for key extraction, and + `partition_point` for boolean conditions. + +## 2.0.2 (2023-09-29) + +- The `hashbrown` dependency has been updated to version 0.14.1 to + complete the support for Rust 1.63. + +## 2.0.1 (2023-09-27) + +- **MSRV**: Rust 1.63.0 is now supported as well, pending publication of + `hashbrown`'s relaxed MSRV (or use cargo `--ignore-rust-version`). + +## 2.0.0 (2023-06-23) + +- **MSRV**: Rust 1.64.0 or later is now required. + +- The `"std"` feature is no longer auto-detected. It is included in the + default feature set, or else can be enabled like any other Cargo feature. + +- The `"serde-1"` feature has been removed, leaving just the optional + `"serde"` dependency to be enabled like a feature itself. + +- `IndexMap::get_index_mut` now returns `Option<(&K, &mut V)>`, changing + the key part from `&mut K` to `&K`. There is also a new alternative + `MutableKeys::get_index_mut2` to access the former behavior. + +- The new `map::Slice` and `set::Slice` offer a linear view of maps + and sets, behaving a lot like normal `[(K, V)]` and `[T]` slices. Notably, + comparison traits like `Eq` only consider items in order, rather than hash + lookups, and slices even implement `Hash`. + +- `IndexMap` and `IndexSet` now have `sort_by_cached_key` and + `par_sort_by_cached_key` methods which perform stable sorts in place + using a key extraction function. + +- `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and + `try_reserve_exact` methods that correspond to the same methods on `Vec`. + However, exactness only applies to the direct capacity for items, while the + raw hash table still follows its own rules for capacity and load factor. + +- The `Equivalent` trait is now re-exported from the `equivalent` crate, + intended as a common base to allow types to work with multiple map types. + +- The `hashbrown` dependency has been updated to version 0.14. + +- The `serde_seq` module has been moved from the crate root to below the + `map` module. + +## 1.9.3 (2023-03-24) + +- Bump the `rustc-rayon` dependency, for compiler use only. + +## 1.9.2 (2022-11-17) + +- `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and + `quickcheck::Arbitrary` if those optional dependency features are enabled. + +## 1.9.1 (2022-06-21) + +- The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown` + 0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0 + until there is a later published version relaxing its requirement. + +## 1.9.0 (2022-06-16) + +- **MSRV**: Rust 1.56.1 or later is now required. + +- The `hashbrown` dependency has been updated to version 0.12. + +- `IterMut` and `ValuesMut` now implement `Debug`. + +- The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink + the capacity with a lower bound. + +- The new `IndexMap::move_index` and `IndexSet::move_index` methods change + the position of an item from one index to another, shifting the items + between to accommodate the move. + +## 1.8.2 (2022-05-27) + +- Bump the `rustc-rayon` dependency, for compiler use only. + +## 1.8.1 (2022-03-29) + +- The new `IndexSet::replace_full` will return the index of the item along + with the replaced value, if any, by @zakcutner in PR [222]. + +[222]: https://github.com/indexmap-rs/indexmap/pull/222 + +## 1.8.0 (2022-01-07) + +- The new `IndexMap::into_keys` and `IndexMap::into_values` will consume + the map into keys or values, respectively, matching Rust 1.54's `HashMap` + methods, by @taiki-e in PR [195]. + +- More of the iterator types implement `Debug`, `ExactSizeIterator`, and + `FusedIterator`, by @cuviper in PR [196]. + +- `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`, + by @cuviper in PR [197]. + +- `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const` + functions, allowing static maps and sets, by @mwillsey in PR [203]. + +- `IndexMap` and `IndexSet` now implement `From` for arrays, matching + Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205]. + +- `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`, + `sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents, + which sort in-place without preserving the order of equal items, by + @bhgomes in PR [211]. + +[195]: https://github.com/indexmap-rs/indexmap/pull/195 +[196]: https://github.com/indexmap-rs/indexmap/pull/196 +[197]: https://github.com/indexmap-rs/indexmap/pull/197 +[203]: https://github.com/indexmap-rs/indexmap/pull/203 +[205]: https://github.com/indexmap-rs/indexmap/pull/205 +[211]: https://github.com/indexmap-rs/indexmap/pull/211 + +## 1.7.0 (2021-06-29) + +- **MSRV**: Rust 1.49 or later is now required. + +- The `hashbrown` dependency has been updated to version 0.11. + +## 1.6.2 (2021-03-05) + +- Fixed to match `std` behavior, `OccupiedEntry::key` now references the + existing key in the map instead of the lookup key, by @cuviper in PR [170]. + +- The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method, + passing `&K` to the callback to create a value, by @cuviper in PR [175]. + +[170]: https://github.com/indexmap-rs/indexmap/pull/170 +[175]: https://github.com/indexmap-rs/indexmap/pull/175 + +## 1.6.1 (2020-12-14) + +- The new `serde_seq` module implements `IndexMap` serialization as a + sequence to ensure order is preserved, by @cuviper in PR [158]. + +- New methods on maps and sets work like the `Vec`/slice methods by the same name: + `truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and + `swap_indices`, by @cuviper in PR [160]. + +[158]: https://github.com/indexmap-rs/indexmap/pull/158 +[160]: https://github.com/indexmap-rs/indexmap/pull/160 + +## 1.6.0 (2020-09-05) + +- **MSRV**: Rust 1.36 or later is now required. + +- The `hashbrown` dependency has been updated to version 0.9. + +## 1.5.2 (2020-09-01) + +- The new "std" feature will force the use of `std` for users that explicitly + want the default `S = RandomState`, bypassing the autodetection added in 1.3.0, + by @cuviper in PR [145]. + +[145]: https://github.com/indexmap-rs/indexmap/pull/145 + +## 1.5.1 (2020-08-07) + +- Values can now be indexed by their `usize` position by @cuviper in PR [132]. + +- Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141]. + +- `drain` now accepts any `R: RangeBounds` by @cuviper in PR [142]. + +[132]: https://github.com/indexmap-rs/indexmap/pull/132 +[141]: https://github.com/indexmap-rs/indexmap/pull/141 +[142]: https://github.com/indexmap-rs/indexmap/pull/142 + +## 1.5.0 (2020-07-17) + +- **MSRV**: Rust 1.32 or later is now required. + +- The inner hash table is now based on `hashbrown` by @cuviper in PR [131]. + This also completes the method `reserve` and adds `shrink_to_fit`. + +- Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`, + and `shift_remove_entry`, by @cuviper in PR [136] + +- `Clone::clone_from` reuses allocations by @cuviper in PR [125] + +- Add new method `reverse` by @linclelinkpart5 in PR [128] + +[125]: https://github.com/indexmap-rs/indexmap/pull/125 +[128]: https://github.com/indexmap-rs/indexmap/pull/128 +[131]: https://github.com/indexmap-rs/indexmap/pull/131 +[136]: https://github.com/indexmap-rs/indexmap/pull/136 + +## 1.4.0 (2020-06-01) + +- Add new method `get_index_of` by @Thermatrix in PR [115] and [120] + +- Fix build script rebuild-if-changed configuration to use "build.rs"; + fixes issue [123]. Fix by @cuviper. + +- Dev-dependencies (rand and quickcheck) have been updated. The crate's tests + now run using Rust 1.32 or later (MSRV for building the crate has not changed). + by @kjeremy and @bluss + +[123]: https://github.com/indexmap-rs/indexmap/issues/123 +[115]: https://github.com/indexmap-rs/indexmap/pull/115 +[120]: https://github.com/indexmap-rs/indexmap/pull/120 + +## 1.3.2 (2020-02-05) + +- Maintenance update to regenerate the published `Cargo.toml`. + +## 1.3.1 (2020-01-15) + +- Maintenance update for formatting and `autocfg` 1.0. + +## 1.3.0 (2019-10-18) + +- The deprecation messages in the previous version have been removed. + (The methods have not otherwise changed.) Docs for removal methods have been + improved. +- From Rust 1.36, this crate supports being built **without std**, requiring + `alloc` instead. This is enabled automatically when it is detected that + `std` is not available. There is no crate feature to enable/disable to + trigger this. The new build-dep `autocfg` enables this. + +## 1.2.0 (2019-09-08) + +- Plain `.remove()` now has a deprecation message, it informs the user + about picking one of the removal functions `swap_remove` and `shift_remove` + which have different performance and order semantics. + Plain `.remove()` will not be removed, the warning message and method + will remain until further. + +- Add new method `shift_remove` for order preserving removal on the map, + and `shift_take` for the corresponding operation on the set. + +- Add methods `swap_remove`, `swap_remove_entry` to `Entry`. + +- Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()` + +- Internal improvements: fix warnings, deprecations and style lints + +## 1.1.0 (2019-08-20) + +- Added optional feature `"rayon"` that adds parallel iterator support + to `IndexMap` and `IndexSet` using Rayon. This includes all the regular + iterators in parallel versions, and parallel sort. + +- Implemented `Clone` for `map::{Iter, Keys, Values}` and + `set::{Difference, Intersection, Iter, SymmetricDifference, Union}` + +- Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and + `set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}` + +- Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`. + +- Minimum Rust version requirement increased to Rust 1.30 for development builds. + +## 1.0.2 (2018-10-22) + +- The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are + both like `insert` with the index included in the return value. + +- The new method `Entry::and_modify` can be used to modify occupied + entries, matching the new methods of `std` maps in Rust 1.26. + +- The new method `Entry::or_default` inserts a default value in unoccupied + entries, matching the new methods of `std` maps in Rust 1.28. + +## 1.0.1 (2018-03-24) + +- Document Rust version policy for the crate (see rustdoc) + +## 1.0.0 (2018-03-11) + +- This is the 1.0 release for `indexmap`! (the crate and datastructure + formerly known as “ordermap”) +- `OccupiedEntry::insert` changed its signature, to use `&mut self` for + the method receiver, matching the equivalent method for a standard + `HashMap`. Thanks to @dtolnay for finding this bug. +- The deprecated old names from ordermap were removed: `OrderMap`, + `OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap` + etc names instead. + +## 0.4.1 (2018-02-14) + +- Renamed crate to `indexmap`; the `ordermap` crate is now deprecated + and the types `OrderMap/Set` now have a deprecation notice. + +## 0.4.0 (2018-02-02) + +- This is the last release series for this `ordermap` under that name, + because the crate is **going to be renamed** to `indexmap` (with types + `IndexMap`, `IndexSet`) and no change in functionality! +- The map and its associated structs moved into the `map` submodule of the + crate, so that the map and set are symmetric + + + The iterators, `Entry` and other structs are now under `ordermap::map::` + +- Internally refactored `OrderMap` so that all the main algorithms + (insertion, lookup, removal etc) that don't use the `S` parameter (the + hasher) are compiled without depending on `S`, which reduces generics bloat. + +- `Entry` no longer has a type parameter `S`, which is just like + the standard `HashMap`'s entry. + +- Minimum Rust version requirement increased to Rust 1.18 + +## 0.3.5 (2018-01-14) + +- Documentation improvements + +## 0.3.4 (2018-01-04) + +- The `.retain()` methods for `OrderMap` and `OrderSet` now + traverse the elements in order, and the retained elements **keep their order** +- Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and + `.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to + sort the maps in place efficiently. + +## 0.3.3 (2017-12-28) + +- Document insertion behaviour better by @lucab +- Updated dependences (no feature changes) by @ignatenkobrain + +## 0.3.2 (2017-11-25) + +- Add `OrderSet` by @cuviper! +- `OrderMap::drain` is now (too) a double ended iterator. + +## 0.3.1 (2017-11-19) + +- In all ordermap iterators, forward the `collect` method to the underlying + iterator as well. +- Add crates.io categories. + +## 0.3.0 (2017-10-07) + +- The methods `get_pair`, `get_pair_index` were both replaced by + `get_full` (and the same for the mutable case). +- Method `swap_remove_pair` replaced by `swap_remove_full`. +- Add trait `MutableKeys` for opt-in mutable key access. Mutable key access + is only possible through the methods of this extension trait. +- Add new trait `Equivalent` for key equivalence. This extends the + `Borrow` trait mechanism for `OrderMap::get` in a backwards compatible + way, just some minor type inference related issues may become apparent. + See [#10] for more information. +- Implement `Extend<(&K, &V)>` by @xfix. + +[#10]: https://github.com/indexmap-rs/indexmap/pull/10 + +## 0.2.13 (2017-09-30) + +- Fix deserialization to support custom hashers by @Techcable. +- Add methods `.index()` on the entry types by @garro95. + +## 0.2.12 (2017-09-11) + +- Add methods `.with_hasher()`, `.hasher()`. + +## 0.2.11 (2017-08-29) + +- Support `ExactSizeIterator` for the iterators. By @Binero. +- Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct. +- Serde support, with crate feature `"serde-1"`. By @xfix. + +## 0.2.10 (2017-04-29) + +- Add iterator `.drain(..)` by @stevej. + +## 0.2.9 (2017-03-26) + +- Add method `.is_empty()` by @overvenus. +- Implement `PartialEq, Eq` by @overvenus. +- Add method `.sorted_by()`. + +## 0.2.8 (2017-03-01) + +- Add iterators `.values()` and `.values_mut()`. +- Fix compatibility with 32-bit platforms. + +## 0.2.7 (2016-11-02) + +- Add `.retain()`. + +## 0.2.6 (2016-11-02) + +- Add `OccupiedEntry::remove_entry` and other minor entry methods, + so that it now has all the features of `HashMap`'s entries. + +## 0.2.5 (2016-10-31) + +- Improved `.pop()` slightly. + +## 0.2.4 (2016-10-22) + +- Improved performance of `.insert()` ([#3]) by @pczarn. + +[#3]: https://github.com/indexmap-rs/indexmap/pull/3 + +## 0.2.3 (2016-10-11) + +- Generalize `Entry` for now, so that it works on hashmaps with non-default + hasher. However, there's a lingering compat issue since libstd `HashMap` + does not parameterize its entries by the hasher (`S` typarm). +- Special case some iterator methods like `.nth()`. + +## 0.2.2 (2016-10-02) + +- Disable the verbose `Debug` impl by default. + +## 0.2.1 (2016-10-02) + +- Fix doc links and clarify docs. + +## 0.2.0 (2016-10-01) + +- Add more `HashMap` methods & compat with its API. +- Experimental support for `.entry()` (the simplest parts of the API). +- Add `.reserve()` (placeholder impl). +- Add `.remove()` as synonym for `.swap_remove()`. +- Changed `.insert()` to swap value if the entry already exists, and + return `Option`. +- Experimental support as an *indexed* hash map! Added methods + `.get_index()`, `.get_index_mut()`, `.swap_remove_index()`, + `.get_pair_index()`, `.get_pair_index_mut()`. + +## 0.1.2 (2016-09-19) + +- Implement the 32/32 split idea for `Pos` which improves cache utilization + and lookup performance. + +## 0.1.1 (2016-09-16) + +- Initial release. diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/benches/bench.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/benches/bench.rs new file mode 100644 index 000000000000..a4e8e21bc2cb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/benches/bench.rs @@ -0,0 +1,763 @@ +#![feature(test)] + +extern crate test; +#[macro_use] +extern crate lazy_static; + +use fnv::FnvHasher; +use std::hash::BuildHasherDefault; +use std::hash::Hash; +type FnvBuilder = BuildHasherDefault; + +use test::black_box; +use test::Bencher; + +use indexmap::IndexMap; + +use std::collections::HashMap; + +use rand::rngs::SmallRng; +use rand::seq::SliceRandom; +use rand::SeedableRng; + +/// Use a consistently seeded Rng for benchmark stability +fn small_rng() -> SmallRng { + let seed = u64::from_le_bytes(*b"indexmap"); + SmallRng::seed_from_u64(seed) +} + +#[bench] +fn new_hashmap(b: &mut Bencher) { + b.iter(|| HashMap::::new()); +} + +#[bench] +fn new_indexmap(b: &mut Bencher) { + b.iter(|| IndexMap::::new()); +} + +#[bench] +fn with_capacity_10e5_hashmap(b: &mut Bencher) { + b.iter(|| HashMap::::with_capacity(10_000)); +} + +#[bench] +fn with_capacity_10e5_indexmap(b: &mut Bencher) { + b.iter(|| IndexMap::::with_capacity(10_000)); +} + +#[bench] +fn insert_hashmap_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_indexmap_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn insert_indexmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_str_10_000(b: &mut Bencher) { + let c = 10_000; + let ss = Vec::from_iter((0..c).map(|x| x.to_string())); + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for key in &ss { + map.insert(&key[..], ()); + } + map + }); +} + +#[bench] +fn insert_indexmap_str_10_000(b: &mut Bencher) { + let c = 10_000; + let ss = Vec::from_iter((0..c).map(|x| x.to_string())); + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for key in &ss { + map.insert(&key[..], ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { + let c = 10_000; + let value = [0u64; 10]; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for i in 0..c { + map.insert(i, value); + } + map + }); +} + +#[bench] +fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) { + let c = 10_000; + let value = [0u64; 10]; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for i in 0..c { + map.insert(i, value); + } + map + }); +} + +#[bench] +fn insert_hashmap_100_000(b: &mut Bencher) { + let c = 100_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_indexmap_100_000(b: &mut Bencher) { + let c = 100_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn insert_indexmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x, ()); + } + map + }); +} + +#[bench] +fn entry_hashmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.entry(x).or_insert(()); + } + map + }); +} + +#[bench] +fn entry_indexmap_150(b: &mut Bencher) { + let c = 150; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.entry(x).or_insert(()); + } + map + }); +} + +#[bench] +fn iter_sum_hashmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let len = c - c / 10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| map.keys().sum::()); +} + +#[bench] +fn iter_sum_indexmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let len = c - c / 10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| map.keys().sum::()); +} + +#[bench] +fn iter_black_box_hashmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let len = c - c / 10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| { + for &key in map.keys() { + black_box(key); + } + }); +} + +#[bench] +fn iter_black_box_indexmap_10_000(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let len = c - c / 10; + for x in 0..len { + map.insert(x, ()); + } + assert_eq!(map.len(), len); + b.iter(|| { + for &key in map.keys() { + black_box(key); + } + }); +} + +fn shuffled_keys(iter: I) -> Vec +where + I: IntoIterator, +{ + let mut v = Vec::from_iter(iter); + let mut rng = small_rng(); + v.shuffle(&mut rng); + v +} + +#[bench] +fn lookup_hashmap_10_000_exist(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in 5000..c { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in c..15000 { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_indexmap_10_000_exist(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in 5000..c { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_indexmap_10_000_noexist(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key, 1); + } + b.iter(|| { + let mut found = 0; + for key in c..15000 { + found += map.get(&key).is_some() as i32; + } + found + }); +} + +// number of items to look up +const LOOKUP_MAP_SIZE: u32 = 100_000_u32; +const LOOKUP_SAMPLE_SIZE: u32 = 5000; +const SORT_MAP_SIZE: usize = 10_000; + +// use lazy_static so that comparison benchmarks use the exact same inputs +lazy_static! { + static ref KEYS: Vec = shuffled_keys(0..LOOKUP_MAP_SIZE); +} + +lazy_static! { + static ref HMAP_100K: HashMap = { + let c = LOOKUP_MAP_SIZE; + let mut map = HashMap::with_capacity(c as usize); + let keys = &*KEYS; + for &key in keys { + map.insert(key, key); + } + map + }; +} + +lazy_static! { + static ref IMAP_100K: IndexMap = { + let c = LOOKUP_MAP_SIZE; + let mut map = IndexMap::with_capacity(c as usize); + let keys = &*KEYS; + for &key in keys { + map.insert(key, key); + } + map + }; +} + +lazy_static! { + static ref IMAP_SORT_U32: IndexMap = { + let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); + for &key in &KEYS[..SORT_MAP_SIZE] { + map.insert(key, key); + } + map + }; +} +lazy_static! { + static ref IMAP_SORT_S: IndexMap = { + let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); + for &key in &KEYS[..SORT_MAP_SIZE] { + map.insert(format!("{:^16x}", &key), String::new()); + } + map + }; +} + +#[bench] +fn lookup_hashmap_100_000_multi(b: &mut Bencher) { + let map = &*HMAP_100K; + b.iter(|| { + let mut found = 0; + for key in 0..LOOKUP_SAMPLE_SIZE { + found += map.get(&key).is_some() as u32; + } + found + }); +} + +#[bench] +fn lookup_indexmap_100_000_multi(b: &mut Bencher) { + let map = &*IMAP_100K; + b.iter(|| { + let mut found = 0; + for key in 0..LOOKUP_SAMPLE_SIZE { + found += map.get(&key).is_some() as u32; + } + found + }); +} + +// inorder: Test looking up keys in the same order as they were inserted +#[bench] +fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { + let map = &*HMAP_100K; + let keys = &*KEYS; + b.iter(|| { + let mut found = 0; + for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { + found += map.get(key).is_some() as u32; + } + found + }); +} + +#[bench] +fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) { + let map = &*IMAP_100K; + let keys = &*KEYS; + b.iter(|| { + let mut found = 0; + for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { + found += map.get(key).is_some() as u32; + } + found + }); +} + +#[bench] +fn lookup_hashmap_100_000_single(b: &mut Bencher) { + let map = &*HMAP_100K; + let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); + b.iter(|| { + let key = iter.next().unwrap(); + map.get(&key).is_some() + }); +} + +#[bench] +fn lookup_indexmap_100_000_single(b: &mut Bencher) { + let map = &*IMAP_100K; + let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); + b.iter(|| { + let key = iter.next().unwrap(); + map.get(&key).is_some() + }); +} + +const GROW_SIZE: usize = 100_000; +type GrowKey = u32; + +// Test grow/resize without preallocation +#[bench] +fn grow_fnv_hashmap_100_000(b: &mut Bencher) { + b.iter(|| { + let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); + for x in 0..GROW_SIZE { + map.insert(x as GrowKey, x as GrowKey); + } + map + }); +} + +#[bench] +fn grow_fnv_indexmap_100_000(b: &mut Bencher) { + b.iter(|| { + let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); + for x in 0..GROW_SIZE { + map.insert(x as GrowKey, x as GrowKey); + } + map + }); +} + +const MERGE: u64 = 10_000; +#[bench] +fn hashmap_merge_simple(b: &mut Bencher) { + let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + b.iter(|| { + let mut merged = first_map.clone(); + merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); + merged + }); +} + +#[bench] +fn hashmap_merge_shuffle(b: &mut Bencher) { + let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + let mut v = Vec::new(); + let mut rng = small_rng(); + b.iter(|| { + let mut merged = first_map.clone(); + v.extend(second_map.iter().map(|(&k, &v)| (k, v))); + v.shuffle(&mut rng); + merged.extend(v.drain(..)); + + merged + }); +} + +#[bench] +fn indexmap_merge_simple(b: &mut Bencher) { + let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + b.iter(|| { + let mut merged = first_map.clone(); + merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); + merged + }); +} + +#[bench] +fn indexmap_merge_shuffle(b: &mut Bencher) { + let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); + let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); + let mut v = Vec::new(); + let mut rng = small_rng(); + b.iter(|| { + let mut merged = first_map.clone(); + v.extend(second_map.iter().map(|(&k, &v)| (k, v))); + v.shuffle(&mut rng); + merged.extend(v.drain(..)); + + merged + }); +} + +#[bench] +fn swap_remove_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); + let mut keys = Vec::from_iter(map.keys().copied()); + let mut rng = small_rng(); + keys.shuffle(&mut rng); + + b.iter(|| { + let mut map = map.clone(); + for key in &keys { + map.swap_remove(key); + } + assert_eq!(map.len(), 0); + map + }); +} + +#[bench] +fn shift_remove_indexmap_100_000_few(b: &mut Bencher) { + let map = IMAP_100K.clone(); + let mut keys = Vec::from_iter(map.keys().copied()); + let mut rng = small_rng(); + keys.shuffle(&mut rng); + keys.truncate(50); + + b.iter(|| { + let mut map = map.clone(); + for key in &keys { + map.shift_remove(key); + } + assert_eq!(map.len(), IMAP_100K.len() - keys.len()); + map + }); +} + +#[bench] +fn shift_remove_indexmap_2_000_full(b: &mut Bencher) { + let mut keys = KEYS[..2_000].to_vec(); + let mut map = IndexMap::with_capacity(keys.len()); + for &key in &keys { + map.insert(key, key); + } + let mut rng = small_rng(); + keys.shuffle(&mut rng); + + b.iter(|| { + let mut map = map.clone(); + for key in &keys { + map.shift_remove(key); + } + assert_eq!(map.len(), 0); + map + }); +} + +#[bench] +fn pop_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + while !map.is_empty() { + map.pop(); + } + assert_eq!(map.len(), 0); + map + }); +} + +#[bench] +fn few_retain_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 7 == 0); + map + }); +} + +#[bench] +fn few_retain_hashmap_100_000(b: &mut Bencher) { + let map = HMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 7 == 0); + map + }); +} + +#[bench] +fn half_retain_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 2 == 0); + map + }); +} + +#[bench] +fn half_retain_hashmap_100_000(b: &mut Bencher) { + let map = HMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 2 == 0); + map + }); +} + +#[bench] +fn many_retain_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 100 != 0); + map + }); +} + +#[bench] +fn many_retain_hashmap_100_000(b: &mut Bencher) { + let map = HMAP_100K.clone(); + + b.iter(|| { + let mut map = map.clone(); + map.retain(|k, _| *k % 100 != 0); + map + }); +} + +// simple sort impl for comparison +pub fn simple_sort(m: &mut IndexMap) { + let mut ordered: Vec<_> = m.drain(..).collect(); + ordered.sort_by(|left, right| left.0.cmp(&right.0)); + m.extend(ordered); +} + +#[bench] +fn indexmap_sort_s(b: &mut Bencher) { + let map = IMAP_SORT_S.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + map.sort_keys(); + map + }); +} + +#[bench] +fn indexmap_simple_sort_s(b: &mut Bencher) { + let map = IMAP_SORT_S.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + simple_sort(&mut map); + map + }); +} + +#[bench] +fn indexmap_sort_u32(b: &mut Bencher) { + let map = IMAP_SORT_U32.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + map.sort_keys(); + map + }); +} + +#[bench] +fn indexmap_simple_sort_u32(b: &mut Bencher) { + let map = IMAP_SORT_U32.clone(); + + // there's a map clone there, but it's still useful to profile this + b.iter(|| { + let mut map = map.clone(); + simple_sort(&mut map); + map + }); +} + +// measure the fixed overhead of cloning in sort benchmarks +#[bench] +fn indexmap_clone_for_sort_s(b: &mut Bencher) { + let map = IMAP_SORT_S.clone(); + + b.iter(|| map.clone()); +} + +#[bench] +fn indexmap_clone_for_sort_u32(b: &mut Bencher) { + let map = IMAP_SORT_U32.clone(); + + b.iter(|| map.clone()); +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/benches/faststring.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/benches/faststring.rs new file mode 100644 index 000000000000..ecc28b408b04 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/benches/faststring.rs @@ -0,0 +1,185 @@ +#![feature(test)] + +extern crate test; + +use test::Bencher; + +use indexmap::IndexMap; + +use std::collections::HashMap; + +use rand::rngs::SmallRng; +use rand::seq::SliceRandom; +use rand::SeedableRng; + +use std::hash::{Hash, Hasher}; + +use std::borrow::Borrow; +use std::ops::Deref; + +/// Use a consistently seeded Rng for benchmark stability +fn small_rng() -> SmallRng { + let seed = u64::from_le_bytes(*b"indexmap"); + SmallRng::seed_from_u64(seed) +} + +#[derive(PartialEq, Eq, Copy, Clone)] +#[repr(transparent)] +pub struct OneShot(pub T); + +impl Hash for OneShot { + fn hash(&self, h: &mut H) { + h.write(self.0.as_bytes()) + } +} + +impl<'a, S> From<&'a S> for &'a OneShot +where + S: AsRef, +{ + fn from(s: &'a S) -> Self { + let s: &str = s.as_ref(); + unsafe { &*(s as *const str as *const OneShot) } + } +} + +impl Hash for OneShot { + fn hash(&self, h: &mut H) { + h.write(self.0.as_bytes()) + } +} + +impl Borrow> for OneShot { + fn borrow(&self) -> &OneShot { + <&OneShot>::from(&self.0) + } +} + +impl Deref for OneShot { + type Target = T; + fn deref(&self) -> &T { + &self.0 + } +} + +fn shuffled_keys(iter: I) -> Vec +where + I: IntoIterator, +{ + let mut v = Vec::from_iter(iter); + let mut rng = small_rng(); + v.shuffle(&mut rng); + v +} + +#[bench] +fn insert_hashmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = HashMap::with_capacity(c); + for x in 0..c { + map.insert(OneShot(x.to_string()), ()); + } + map + }); +} + +#[bench] +fn insert_indexmap_string_10_000(b: &mut Bencher) { + let c = 10_000; + b.iter(|| { + let mut map = IndexMap::with_capacity(c); + for x in 0..c { + map.insert(x.to_string(), ()); + } + map + }); +} + +#[bench] +fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key.to_string(), 1); + } + let lookups = (5000..c).map(|x| x.to_string()).collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { + let c = 10_000; + let mut map = HashMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(OneShot(key.to_string()), 1); + } + let lookups = (5000..c) + .map(|x| OneShot(x.to_string())) + .collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(key.to_string(), 1); + } + let lookups = (5000..c).map(|x| x.to_string()).collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} + +#[bench] +fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) { + let c = 10_000; + let mut map = IndexMap::with_capacity(c); + let keys = shuffled_keys(0..c); + for &key in &keys { + map.insert(OneShot(key.to_string()), 1); + } + let lookups = (5000..c) + .map(|x| OneShot(x.to_string())) + .collect::>(); + b.iter(|| { + let mut found = 0; + for key in &lookups { + found += map.get(key).is_some() as i32; + } + found + }); +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/arbitrary.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/arbitrary.rs new file mode 100644 index 000000000000..7798438c1567 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/arbitrary.rs @@ -0,0 +1,77 @@ +#[cfg(feature = "arbitrary")] +#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] +mod impl_arbitrary { + use crate::{IndexMap, IndexSet}; + use arbitrary::{Arbitrary, Result, Unstructured}; + use core::hash::{BuildHasher, Hash}; + + impl<'a, K, V, S> Arbitrary<'a> for IndexMap + where + K: Arbitrary<'a> + Hash + Eq, + V: Arbitrary<'a>, + S: BuildHasher + Default, + { + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + u.arbitrary_iter()?.collect() + } + + fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { + u.arbitrary_take_rest_iter()?.collect() + } + } + + impl<'a, T, S> Arbitrary<'a> for IndexSet + where + T: Arbitrary<'a> + Hash + Eq, + S: BuildHasher + Default, + { + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + u.arbitrary_iter()?.collect() + } + + fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { + u.arbitrary_take_rest_iter()?.collect() + } + } +} + +#[cfg(feature = "quickcheck")] +#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] +mod impl_quickcheck { + use crate::{IndexMap, IndexSet}; + use alloc::boxed::Box; + use alloc::vec::Vec; + use core::hash::{BuildHasher, Hash}; + use quickcheck::{Arbitrary, Gen}; + + impl Arbitrary for IndexMap + where + K: Arbitrary + Hash + Eq, + V: Arbitrary, + S: BuildHasher + Default + Clone + 'static, + { + fn arbitrary(g: &mut Gen) -> Self { + Self::from_iter(Vec::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + let vec = Vec::from_iter(self.clone()); + Box::new(vec.shrink().map(Self::from_iter)) + } + } + + impl Arbitrary for IndexSet + where + T: Arbitrary + Hash + Eq, + S: BuildHasher + Default + Clone + 'static, + { + fn arbitrary(g: &mut Gen) -> Self { + Self::from_iter(Vec::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + let vec = Vec::from_iter(self.clone()); + Box::new(vec.shrink().map(Self::from_iter)) + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/borsh.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/borsh.rs new file mode 100644 index 000000000000..c485bd522202 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/borsh.rs @@ -0,0 +1,122 @@ +#![cfg_attr(docsrs, doc(cfg(feature = "borsh")))] + +use alloc::vec::Vec; +use core::hash::BuildHasher; +use core::hash::Hash; +use core::mem::size_of; + +use borsh::error::ERROR_ZST_FORBIDDEN; +use borsh::io::{Error, ErrorKind, Read, Result, Write}; +use borsh::{BorshDeserialize, BorshSerialize}; + +use crate::map::IndexMap; +use crate::set::IndexSet; + +impl BorshSerialize for IndexMap +where + K: BorshSerialize, + V: BorshSerialize, +{ + #[inline] + fn serialize(&self, writer: &mut W) -> Result<()> { + check_zst::()?; + + let iterator = self.iter(); + + u32::try_from(iterator.len()) + .map_err(|_| ErrorKind::InvalidData)? + .serialize(writer)?; + + for (key, value) in iterator { + key.serialize(writer)?; + value.serialize(writer)?; + } + + Ok(()) + } +} + +impl BorshDeserialize for IndexMap +where + K: BorshDeserialize + Eq + Hash, + V: BorshDeserialize, + S: BuildHasher + Default, +{ + #[inline] + fn deserialize_reader(reader: &mut R) -> Result { + check_zst::()?; + let vec = >::deserialize_reader(reader)?; + Ok(vec.into_iter().collect::>()) + } +} + +impl BorshSerialize for IndexSet +where + T: BorshSerialize, +{ + #[inline] + fn serialize(&self, writer: &mut W) -> Result<()> { + check_zst::()?; + + let iterator = self.iter(); + + u32::try_from(iterator.len()) + .map_err(|_| ErrorKind::InvalidData)? + .serialize(writer)?; + + for item in iterator { + item.serialize(writer)?; + } + + Ok(()) + } +} + +impl BorshDeserialize for IndexSet +where + T: BorshDeserialize + Eq + Hash, + S: BuildHasher + Default, +{ + #[inline] + fn deserialize_reader(reader: &mut R) -> Result { + check_zst::()?; + let vec = >::deserialize_reader(reader)?; + Ok(vec.into_iter().collect::>()) + } +} + +fn check_zst() -> Result<()> { + if size_of::() == 0 { + return Err(Error::new(ErrorKind::InvalidData, ERROR_ZST_FORBIDDEN)); + } + Ok(()) +} + +#[cfg(test)] +mod borsh_tests { + use super::*; + + #[test] + fn map_borsh_roundtrip() { + let original_map: IndexMap = { + let mut map = IndexMap::new(); + map.insert(1, 2); + map.insert(3, 4); + map.insert(5, 6); + map + }; + let serialized_map = borsh::to_vec(&original_map).unwrap(); + let deserialized_map: IndexMap = + BorshDeserialize::try_from_slice(&serialized_map).unwrap(); + assert_eq!(original_map, deserialized_map); + } + + #[test] + fn set_borsh_roundtrip() { + let original_map: IndexSet = [1, 2, 3, 4, 5, 6].into_iter().collect(); + let serialized_map = borsh::to_vec(&original_map).unwrap(); + let deserialized_map: IndexSet = + BorshDeserialize::try_from_slice(&serialized_map).unwrap(); + assert_eq!(original_map, deserialized_map); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/lib.rs new file mode 100644 index 000000000000..3e16bc6e191a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/lib.rs @@ -0,0 +1,274 @@ +// We *mostly* avoid unsafe code, but `Slice` allows it for DST casting. +#![deny(unsafe_code)] +#![warn(rust_2018_idioms)] +#![no_std] + +//! [`IndexMap`] is a hash table where the iteration order of the key-value +//! pairs is independent of the hash values of the keys. +//! +//! [`IndexSet`] is a corresponding hash set using the same implementation and +//! with similar properties. +//! +//! ### Highlights +//! +//! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap` +//! and `HashSet`, but they also have some features of note: +//! +//! - The ordering semantics (see their documentation for details) +//! - Sorting methods and the [`.pop()`][IndexMap::pop] methods. +//! - The [`Equivalent`] trait, which offers more flexible equality definitions +//! between borrowed and owned versions of keys. +//! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable +//! access to map keys, and [`MutableValues`][set::MutableValues] for sets. +//! +//! ### Feature Flags +//! +//! To reduce the amount of compiled code in the crate by default, certain +//! features are gated behind [feature flags]. These allow you to opt in to (or +//! out of) functionality. Below is a list of the features available in this +//! crate. +//! +//! * `std`: Enables features which require the Rust standard library. For more +//! information see the section on [`no_std`]. +//! * `rayon`: Enables parallel iteration and other parallel methods. +//! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`] +//! to [`IndexMap`] and [`IndexSet`]. Alternative implementations for +//! (de)serializing [`IndexMap`] as an ordered sequence are available in the +//! [`map::serde_seq`] module. +//! * `borsh`: Adds implementations for [`BorshSerialize`] and [`BorshDeserialize`] +//! to [`IndexMap`] and [`IndexSet`]. +//! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait +//! to [`IndexMap`] and [`IndexSet`]. +//! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait +//! to [`IndexMap`] and [`IndexSet`]. +//! +//! _Note: only the `std` feature is enabled by default._ +//! +//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section +//! [`no_std`]: #no-standard-library-targets +//! [`Serialize`]: `::serde::Serialize` +//! [`Deserialize`]: `::serde::Deserialize` +//! [`BorshSerialize`]: `::borsh::BorshSerialize` +//! [`BorshDeserialize`]: `::borsh::BorshDeserialize` +//! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary` +//! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary` +//! +//! ### Alternate Hashers +//! +//! [`IndexMap`] and [`IndexSet`] have a default hasher type +//! [`S = RandomState`][std::collections::hash_map::RandomState], +//! just like the standard `HashMap` and `HashSet`, which is resistant to +//! HashDoS attacks but not the most performant. Type aliases can make it easier +//! to use alternate hashers: +//! +//! ``` +//! use fnv::FnvBuildHasher; +//! use fxhash::FxBuildHasher; +//! use indexmap::{IndexMap, IndexSet}; +//! +//! type FnvIndexMap = IndexMap; +//! type FnvIndexSet = IndexSet; +//! +//! type FxIndexMap = IndexMap; +//! type FxIndexSet = IndexSet; +//! +//! let std: IndexSet = (0..100).collect(); +//! let fnv: FnvIndexSet = (0..100).collect(); +//! let fx: FxIndexSet = (0..100).collect(); +//! assert_eq!(std, fnv); +//! assert_eq!(std, fx); +//! ``` +//! +//! ### Rust Version +//! +//! This version of indexmap requires Rust 1.63 or later. +//! +//! The indexmap 2.x release series will use a carefully considered version +//! upgrade policy, where in a later 2.x version, we will raise the minimum +//! required Rust version. +//! +//! ## No Standard Library Targets +//! +//! This crate supports being built without `std`, requiring `alloc` instead. +//! This is chosen by disabling the default "std" cargo feature, by adding +//! `default-features = false` to your dependency specification. +//! +//! - Creating maps and sets using [`new`][IndexMap::new] and +//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`. +//! Use methods [`IndexMap::default`], [`with_hasher`][IndexMap::with_hasher], +//! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead. +//! A no-std compatible hasher will be needed as well, for example +//! from the crate `twox-hash`. +//! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`. + +#![cfg_attr(docsrs, feature(doc_cfg))] + +extern crate alloc; + +#[cfg(feature = "std")] +#[macro_use] +extern crate std; + +use alloc::vec::{self, Vec}; + +mod arbitrary; +#[macro_use] +mod macros; +#[cfg(feature = "borsh")] +mod borsh; +#[cfg(feature = "serde")] +mod serde; +mod util; + +pub mod map; +pub mod set; + +// Placed after `map` and `set` so new `rayon` methods on the types +// are documented after the "normal" methods. +#[cfg(feature = "rayon")] +mod rayon; + +#[cfg(feature = "rustc-rayon")] +mod rustc; + +pub use crate::map::IndexMap; +pub use crate::set::IndexSet; +pub use equivalent::Equivalent; + +// shared private items + +/// Hash value newtype. Not larger than usize, since anything larger +/// isn't used for selecting position anyway. +#[derive(Clone, Copy, Debug, PartialEq)] +struct HashValue(usize); + +impl HashValue { + #[inline(always)] + fn get(self) -> u64 { + self.0 as u64 + } +} + +#[derive(Copy, Debug)] +struct Bucket { + hash: HashValue, + key: K, + value: V, +} + +impl Clone for Bucket +where + K: Clone, + V: Clone, +{ + fn clone(&self) -> Self { + Bucket { + hash: self.hash, + key: self.key.clone(), + value: self.value.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.hash = other.hash; + self.key.clone_from(&other.key); + self.value.clone_from(&other.value); + } +} + +impl Bucket { + // field accessors -- used for `f` instead of closures in `.map(f)` + fn key_ref(&self) -> &K { + &self.key + } + fn value_ref(&self) -> &V { + &self.value + } + fn value_mut(&mut self) -> &mut V { + &mut self.value + } + fn key(self) -> K { + self.key + } + fn value(self) -> V { + self.value + } + fn key_value(self) -> (K, V) { + (self.key, self.value) + } + fn refs(&self) -> (&K, &V) { + (&self.key, &self.value) + } + fn ref_mut(&mut self) -> (&K, &mut V) { + (&self.key, &mut self.value) + } + fn muts(&mut self) -> (&mut K, &mut V) { + (&mut self.key, &mut self.value) + } +} + +trait Entries { + type Entry; + fn into_entries(self) -> Vec; + fn as_entries(&self) -> &[Self::Entry]; + fn as_entries_mut(&mut self) -> &mut [Self::Entry]; + fn with_entries(&mut self, f: F) + where + F: FnOnce(&mut [Self::Entry]); +} + +/// The error type for [`try_reserve`][IndexMap::try_reserve] methods. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct TryReserveError { + kind: TryReserveErrorKind, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +enum TryReserveErrorKind { + // The standard library's kind is currently opaque to us, otherwise we could unify this. + Std(alloc::collections::TryReserveError), + CapacityOverflow, + AllocError { layout: alloc::alloc::Layout }, +} + +// These are not `From` so we don't expose them in our public API. +impl TryReserveError { + fn from_alloc(error: alloc::collections::TryReserveError) -> Self { + Self { + kind: TryReserveErrorKind::Std(error), + } + } + + fn from_hashbrown(error: hashbrown::TryReserveError) -> Self { + Self { + kind: match error { + hashbrown::TryReserveError::CapacityOverflow => { + TryReserveErrorKind::CapacityOverflow + } + hashbrown::TryReserveError::AllocError { layout } => { + TryReserveErrorKind::AllocError { layout } + } + }, + } + } +} + +impl core::fmt::Display for TryReserveError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let reason = match &self.kind { + TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f), + TryReserveErrorKind::CapacityOverflow => { + " because the computed capacity exceeded the collection's maximum" + } + TryReserveErrorKind::AllocError { .. } => { + " because the memory allocator returned an error" + } + }; + f.write_str("memory allocation failed")?; + f.write_str(reason) + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl std::error::Error for TryReserveError {} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/macros.rs new file mode 100644 index 000000000000..b347de22dd57 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/macros.rs @@ -0,0 +1,178 @@ +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[macro_export] +/// Create an [`IndexMap`][crate::IndexMap] from a list of key-value pairs +/// +/// ## Example +/// +/// ``` +/// use indexmap::indexmap; +/// +/// let map = indexmap!{ +/// "a" => 1, +/// "b" => 2, +/// }; +/// assert_eq!(map["a"], 1); +/// assert_eq!(map["b"], 2); +/// assert_eq!(map.get("c"), None); +/// +/// // "a" is the first key +/// assert_eq!(map.keys().next(), Some(&"a")); +/// ``` +macro_rules! indexmap { + ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) }; + ($($key:expr => $value:expr),*) => { + { + // Note: `stringify!($key)` is just here to consume the repetition, + // but we throw away that string literal during constant evaluation. + const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); + let mut map = $crate::IndexMap::with_capacity(CAP); + $( + map.insert($key, $value); + )* + map + } + }; +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +#[macro_export] +/// Create an [`IndexSet`][crate::IndexSet] from a list of values +/// +/// ## Example +/// +/// ``` +/// use indexmap::indexset; +/// +/// let set = indexset!{ +/// "a", +/// "b", +/// }; +/// assert!(set.contains("a")); +/// assert!(set.contains("b")); +/// assert!(!set.contains("c")); +/// +/// // "a" is the first value +/// assert_eq!(set.iter().next(), Some(&"a")); +/// ``` +macro_rules! indexset { + ($($value:expr,)+) => { $crate::indexset!($($value),+) }; + ($($value:expr),*) => { + { + // Note: `stringify!($value)` is just here to consume the repetition, + // but we throw away that string literal during constant evaluation. + const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); + let mut set = $crate::IndexSet::with_capacity(CAP); + $( + set.insert($value); + )* + set + } + }; +} + +// generate all the Iterator methods by just forwarding to the underlying +// self.iter and mapping its element. +macro_rules! iterator_methods { + // $map_elt is the mapping function from the underlying iterator's element + // same mapping function for both options and iterators + ($map_elt:expr) => { + fn next(&mut self) -> Option { + self.iter.next().map($map_elt) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn count(self) -> usize { + self.iter.len() + } + + fn nth(&mut self, n: usize) -> Option { + self.iter.nth(n).map($map_elt) + } + + fn last(mut self) -> Option { + self.next_back() + } + + fn collect(self) -> C + where + C: FromIterator, + { + // NB: forwarding this directly to standard iterators will + // allow it to leverage unstable traits like `TrustedLen`. + self.iter.map($map_elt).collect() + } + }; +} + +macro_rules! double_ended_iterator_methods { + // $map_elt is the mapping function from the underlying iterator's element + // same mapping function for both options and iterators + ($map_elt:expr) => { + fn next_back(&mut self) -> Option { + self.iter.next_back().map($map_elt) + } + + fn nth_back(&mut self, n: usize) -> Option { + self.iter.nth_back(n).map($map_elt) + } + }; +} + +// generate `ParallelIterator` methods by just forwarding to the underlying +// self.entries and mapping its elements. +#[cfg(any(feature = "rayon", feature = "rustc-rayon"))] +macro_rules! parallel_iterator_methods { + // $map_elt is the mapping function from the underlying iterator's element + ($map_elt:expr) => { + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.entries + .into_par_iter() + .map($map_elt) + .drive_unindexed(consumer) + } + + // NB: This allows indexed collection, e.g. directly into a `Vec`, but the + // underlying iterator must really be indexed. We should remove this if we + // start having tombstones that must be filtered out. + fn opt_len(&self) -> Option { + Some(self.entries.len()) + } + }; +} + +// generate `IndexedParallelIterator` methods by just forwarding to the underlying +// self.entries and mapping its elements. +#[cfg(any(feature = "rayon", feature = "rustc-rayon"))] +macro_rules! indexed_parallel_iterator_methods { + // $map_elt is the mapping function from the underlying iterator's element + ($map_elt:expr) => { + fn drive(self, consumer: C) -> C::Result + where + C: Consumer, + { + self.entries.into_par_iter().map($map_elt).drive(consumer) + } + + fn len(&self) -> usize { + self.entries.len() + } + + fn with_producer(self, callback: CB) -> CB::Output + where + CB: ProducerCallback, + { + self.entries + .into_par_iter() + .map($map_elt) + .with_producer(callback) + } + }; +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map.rs new file mode 100644 index 000000000000..946cb6fc316a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map.rs @@ -0,0 +1,1578 @@ +//! [`IndexMap`] is a hash table where the iteration order of the key-value +//! pairs is independent of the hash values of the keys. + +mod core; +mod iter; +mod mutable; +mod slice; + +#[cfg(feature = "serde")] +#[cfg_attr(docsrs, doc(cfg(feature = "serde")))] +pub mod serde_seq; + +#[cfg(test)] +mod tests; + +pub use self::core::raw_entry_v1::{self, RawEntryApiV1}; +pub use self::core::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; +pub use self::iter::{ + Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, IterMut2, Keys, Splice, Values, ValuesMut, +}; +pub use self::mutable::MutableEntryKey; +pub use self::mutable::MutableKeys; +pub use self::slice::Slice; + +#[cfg(feature = "rayon")] +pub use crate::rayon::map as rayon; + +use ::core::cmp::Ordering; +use ::core::fmt; +use ::core::hash::{BuildHasher, Hash, Hasher}; +use ::core::mem; +use ::core::ops::{Index, IndexMut, RangeBounds}; +use alloc::boxed::Box; +use alloc::vec::Vec; + +#[cfg(feature = "std")] +use std::collections::hash_map::RandomState; + +use self::core::IndexMapCore; +use crate::util::{third, try_simplify_range}; +use crate::{Bucket, Entries, Equivalent, HashValue, TryReserveError}; + +/// A hash table where the iteration order of the key-value pairs is independent +/// of the hash values of the keys. +/// +/// The interface is closely compatible with the standard +/// [`HashMap`][std::collections::HashMap], +/// but also has additional features. +/// +/// # Order +/// +/// The key-value pairs have a consistent order that is determined by +/// the sequence of insertion and removal calls on the map. The order does +/// not depend on the keys or the hash function at all. +/// +/// All iterators traverse the map in *the order*. +/// +/// The insertion order is preserved, with **notable exceptions** like the +/// [`.remove()`][Self::remove] or [`.swap_remove()`][Self::swap_remove] methods. +/// Methods such as [`.sort_by()`][Self::sort_by] of +/// course result in a new order, depending on the sorting order. +/// +/// # Indices +/// +/// The key-value pairs are indexed in a compact range without holes in the +/// range `0..self.len()`. For example, the method `.get_full` looks up the +/// index for a key, and the method `.get_index` looks up the key-value pair by +/// index. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// // count the frequency of each letter in a sentence. +/// let mut letters = IndexMap::new(); +/// for ch in "a short treatise on fungi".chars() { +/// *letters.entry(ch).or_insert(0) += 1; +/// } +/// +/// assert_eq!(letters[&'s'], 2); +/// assert_eq!(letters[&'t'], 3); +/// assert_eq!(letters[&'u'], 1); +/// assert_eq!(letters.get(&'y'), None); +/// ``` +#[cfg(feature = "std")] +pub struct IndexMap { + pub(crate) core: IndexMapCore, + hash_builder: S, +} +#[cfg(not(feature = "std"))] +pub struct IndexMap { + pub(crate) core: IndexMapCore, + hash_builder: S, +} + +impl Clone for IndexMap +where + K: Clone, + V: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + IndexMap { + core: self.core.clone(), + hash_builder: self.hash_builder.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.core.clone_from(&other.core); + self.hash_builder.clone_from(&other.hash_builder); + } +} + +impl Entries for IndexMap { + type Entry = Bucket; + + #[inline] + fn into_entries(self) -> Vec { + self.core.into_entries() + } + + #[inline] + fn as_entries(&self) -> &[Self::Entry] { + self.core.as_entries() + } + + #[inline] + fn as_entries_mut(&mut self) -> &mut [Self::Entry] { + self.core.as_entries_mut() + } + + fn with_entries(&mut self, f: F) + where + F: FnOnce(&mut [Self::Entry]), + { + self.core.with_entries(f); + } +} + +impl fmt::Debug for IndexMap +where + K: fmt::Debug, + V: fmt::Debug, +{ + #[cfg(not(feature = "test_debug"))] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } + + #[cfg(feature = "test_debug")] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Let the inner `IndexMapCore` print all of its details + f.debug_struct("IndexMap") + .field("core", &self.core) + .finish() + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl IndexMap { + /// Create a new map. (Does not allocate.) + #[inline] + pub fn new() -> Self { + Self::with_capacity(0) + } + + /// Create a new map with capacity for `n` key-value pairs. (Does not + /// allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + #[inline] + pub fn with_capacity(n: usize) -> Self { + Self::with_capacity_and_hasher(n, <_>::default()) + } +} + +impl IndexMap { + /// Create a new map with capacity for `n` key-value pairs. (Does not + /// allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + #[inline] + pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { + if n == 0 { + Self::with_hasher(hash_builder) + } else { + IndexMap { + core: IndexMapCore::with_capacity(n), + hash_builder, + } + } + } + + /// Create a new map with `hash_builder`. + /// + /// This function is `const`, so it + /// can be called in `static` contexts. + pub const fn with_hasher(hash_builder: S) -> Self { + IndexMap { + core: IndexMapCore::new(), + hash_builder, + } + } + + /// Return the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the map might be able to hold more, + /// but is guaranteed to be able to hold at least this many. + /// + /// Computes in **O(1)** time. + pub fn capacity(&self) -> usize { + self.core.capacity() + } + + /// Return a reference to the map's `BuildHasher`. + pub fn hasher(&self) -> &S { + &self.hash_builder + } + + /// Return the number of key-value pairs in the map. + /// + /// Computes in **O(1)** time. + #[inline] + pub fn len(&self) -> usize { + self.core.len() + } + + /// Returns true if the map contains no elements. + /// + /// Computes in **O(1)** time. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return an iterator over the key-value pairs of the map, in their order + pub fn iter(&self) -> Iter<'_, K, V> { + Iter::new(self.as_entries()) + } + + /// Return an iterator over the key-value pairs of the map, in their order + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + IterMut::new(self.as_entries_mut()) + } + + /// Return an iterator over the keys of the map, in their order + pub fn keys(&self) -> Keys<'_, K, V> { + Keys::new(self.as_entries()) + } + + /// Return an owning iterator over the keys of the map, in their order + pub fn into_keys(self) -> IntoKeys { + IntoKeys::new(self.into_entries()) + } + + /// Return an iterator over the values of the map, in their order + pub fn values(&self) -> Values<'_, K, V> { + Values::new(self.as_entries()) + } + + /// Return an iterator over mutable references to the values of the map, + /// in their order + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + ValuesMut::new(self.as_entries_mut()) + } + + /// Return an owning iterator over the values of the map, in their order + pub fn into_values(self) -> IntoValues { + IntoValues::new(self.into_entries()) + } + + /// Remove all key-value pairs in the map, while preserving its capacity. + /// + /// Computes in **O(n)** time. + pub fn clear(&mut self) { + self.core.clear(); + } + + /// Shortens the map, keeping the first `len` elements and dropping the rest. + /// + /// If `len` is greater than the map's current length, this has no effect. + pub fn truncate(&mut self, len: usize) { + self.core.truncate(len); + } + + /// Clears the `IndexMap` in the given index range, returning those + /// key-value pairs as a drain iterator. + /// + /// The range may be any type that implements [`RangeBounds`], + /// including all of the `std::ops::Range*` types, or even a tuple pair of + /// `Bound` start and end values. To drain the map entirely, use `RangeFull` + /// like `map.drain(..)`. + /// + /// This shifts down all entries following the drained range to fill the + /// gap, and keeps the allocated memory for reuse. + /// + /// ***Panics*** if the starting point is greater than the end point or if + /// the end point is greater than the length of the map. + pub fn drain(&mut self, range: R) -> Drain<'_, K, V> + where + R: RangeBounds, + { + Drain::new(self.core.drain(range)) + } + + /// Splits the collection into two at the given index. + /// + /// Returns a newly allocated map containing the elements in the range + /// `[at, len)`. After the call, the original map will be left containing + /// the elements `[0, at)` with its previous capacity unchanged. + /// + /// ***Panics*** if `at > len`. + pub fn split_off(&mut self, at: usize) -> Self + where + S: Clone, + { + Self { + core: self.core.split_off(at), + hash_builder: self.hash_builder.clone(), + } + } + + /// Reserve capacity for `additional` more key-value pairs. + /// + /// Computes in **O(n)** time. + pub fn reserve(&mut self, additional: usize) { + self.core.reserve(additional); + } + + /// Reserve capacity for `additional` more key-value pairs, without over-allocating. + /// + /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid + /// frequent re-allocations. However, the underlying data structures may still have internal + /// capacity requirements, and the allocator itself may give more space than requested, so this + /// cannot be relied upon to be precisely minimal. + /// + /// Computes in **O(n)** time. + pub fn reserve_exact(&mut self, additional: usize) { + self.core.reserve_exact(additional); + } + + /// Try to reserve capacity for `additional` more key-value pairs. + /// + /// Computes in **O(n)** time. + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.core.try_reserve(additional) + } + + /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. + /// + /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid + /// frequent re-allocations. However, the underlying data structures may still have internal + /// capacity requirements, and the allocator itself may give more space than requested, so this + /// cannot be relied upon to be precisely minimal. + /// + /// Computes in **O(n)** time. + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.core.try_reserve_exact(additional) + } + + /// Shrink the capacity of the map as much as possible. + /// + /// Computes in **O(n)** time. + pub fn shrink_to_fit(&mut self) { + self.core.shrink_to(0); + } + + /// Shrink the capacity of the map with a lower limit. + /// + /// Computes in **O(n)** time. + pub fn shrink_to(&mut self, min_capacity: usize) { + self.core.shrink_to(min_capacity); + } +} + +impl IndexMap +where + K: Hash + Eq, + S: BuildHasher, +{ + /// Insert a key-value pair in the map. + /// + /// If an equivalent key already exists in the map: the key remains and + /// retains in its place in the order, its corresponding value is updated + /// with `value`, and the older value is returned inside `Some(_)`. + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted, last in order, and `None` is returned. + /// + /// Computes in **O(1)** time (amortized average). + /// + /// See also [`entry`][Self::entry] if you want to insert *or* modify, + /// or [`insert_full`][Self::insert_full] if you need to get the index of + /// the corresponding key-value pair. + pub fn insert(&mut self, key: K, value: V) -> Option { + self.insert_full(key, value).1 + } + + /// Insert a key-value pair in the map, and get their index. + /// + /// If an equivalent key already exists in the map: the key remains and + /// retains in its place in the order, its corresponding value is updated + /// with `value`, and the older value is returned inside `(index, Some(_))`. + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted, last in order, and `(index, None)` is returned. + /// + /// Computes in **O(1)** time (amortized average). + /// + /// See also [`entry`][Self::entry] if you want to insert *or* modify. + pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { + let hash = self.hash(&key); + self.core.insert_full(hash, key, value) + } + + /// Insert a key-value pair in the map at its ordered position among sorted keys. + /// + /// This is equivalent to finding the position with + /// [`binary_search_keys`][Self::binary_search_keys], then either updating + /// it or calling [`insert_before`][Self::insert_before] for a new key. + /// + /// If the sorted key is found in the map, its corresponding value is + /// updated with `value`, and the older value is returned inside + /// `(index, Some(_))`. Otherwise, the new key-value pair is inserted at + /// the sorted position, and `(index, None)` is returned. + /// + /// If the existing keys are **not** already sorted, then the insertion + /// index is unspecified (like [`slice::binary_search`]), but the key-value + /// pair is moved to or inserted at that position regardless. + /// + /// Computes in **O(n)** time (average). Instead of repeating calls to + /// `insert_sorted`, it may be faster to call batched [`insert`][Self::insert] + /// or [`extend`][Self::extend] and only call [`sort_keys`][Self::sort_keys] + /// or [`sort_unstable_keys`][Self::sort_unstable_keys] once. + pub fn insert_sorted(&mut self, key: K, value: V) -> (usize, Option) + where + K: Ord, + { + match self.binary_search_keys(&key) { + Ok(i) => (i, Some(mem::replace(&mut self[i], value))), + Err(i) => self.insert_before(i, key, value), + } + } + + /// Insert a key-value pair in the map before the entry at the given index, or at the end. + /// + /// If an equivalent key already exists in the map: the key remains and + /// is moved to the new position in the map, its corresponding value is updated + /// with `value`, and the older value is returned inside `Some(_)`. The returned index + /// will either be the given index or one less, depending on how the entry moved. + /// (See [`shift_insert`](Self::shift_insert) for different behavior here.) + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted exactly at the given index, and `None` is returned. + /// + /// ***Panics*** if `index` is out of bounds. + /// Valid indices are `0..=map.len()` (inclusive). + /// + /// Computes in **O(n)** time (average). + /// + /// See also [`entry`][Self::entry] if you want to insert *or* modify, + /// perhaps only using the index for new entries with [`VacantEntry::shift_insert`]. + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexMap; + /// let mut map: IndexMap = ('a'..='z').map(|c| (c, ())).collect(); + /// + /// // The new key '*' goes exactly at the given index. + /// assert_eq!(map.get_index_of(&'*'), None); + /// assert_eq!(map.insert_before(10, '*', ()), (10, None)); + /// assert_eq!(map.get_index_of(&'*'), Some(10)); + /// + /// // Moving the key 'a' up will shift others down, so this moves *before* 10 to index 9. + /// assert_eq!(map.insert_before(10, 'a', ()), (9, Some(()))); + /// assert_eq!(map.get_index_of(&'a'), Some(9)); + /// assert_eq!(map.get_index_of(&'*'), Some(10)); + /// + /// // Moving the key 'z' down will shift others up, so this moves to exactly 10. + /// assert_eq!(map.insert_before(10, 'z', ()), (10, Some(()))); + /// assert_eq!(map.get_index_of(&'z'), Some(10)); + /// assert_eq!(map.get_index_of(&'*'), Some(11)); + /// + /// // Moving or inserting before the endpoint is also valid. + /// assert_eq!(map.len(), 27); + /// assert_eq!(map.insert_before(map.len(), '*', ()), (26, Some(()))); + /// assert_eq!(map.get_index_of(&'*'), Some(26)); + /// assert_eq!(map.insert_before(map.len(), '+', ()), (27, None)); + /// assert_eq!(map.get_index_of(&'+'), Some(27)); + /// assert_eq!(map.len(), 28); + /// ``` + pub fn insert_before(&mut self, mut index: usize, key: K, value: V) -> (usize, Option) { + assert!(index <= self.len(), "index out of bounds"); + match self.entry(key) { + Entry::Occupied(mut entry) => { + if index > entry.index() { + // Some entries will shift down when this one moves up, + // so "insert before index" becomes "move to index - 1", + // keeping the entry at the original index unmoved. + index -= 1; + } + let old = mem::replace(entry.get_mut(), value); + entry.move_index(index); + (index, Some(old)) + } + Entry::Vacant(entry) => { + entry.shift_insert(index, value); + (index, None) + } + } + } + + /// Insert a key-value pair in the map at the given index. + /// + /// If an equivalent key already exists in the map: the key remains and + /// is moved to the given index in the map, its corresponding value is updated + /// with `value`, and the older value is returned inside `Some(_)`. + /// Note that existing entries **cannot** be moved to `index == map.len()`! + /// (See [`insert_before`](Self::insert_before) for different behavior here.) + /// + /// If no equivalent key existed in the map: the new key-value pair is + /// inserted at the given index, and `None` is returned. + /// + /// ***Panics*** if `index` is out of bounds. + /// Valid indices are `0..map.len()` (exclusive) when moving an existing entry, or + /// `0..=map.len()` (inclusive) when inserting a new key. + /// + /// Computes in **O(n)** time (average). + /// + /// See also [`entry`][Self::entry] if you want to insert *or* modify, + /// perhaps only using the index for new entries with [`VacantEntry::shift_insert`]. + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexMap; + /// let mut map: IndexMap = ('a'..='z').map(|c| (c, ())).collect(); + /// + /// // The new key '*' goes exactly at the given index. + /// assert_eq!(map.get_index_of(&'*'), None); + /// assert_eq!(map.shift_insert(10, '*', ()), None); + /// assert_eq!(map.get_index_of(&'*'), Some(10)); + /// + /// // Moving the key 'a' up to 10 will shift others down, including the '*' that was at 10. + /// assert_eq!(map.shift_insert(10, 'a', ()), Some(())); + /// assert_eq!(map.get_index_of(&'a'), Some(10)); + /// assert_eq!(map.get_index_of(&'*'), Some(9)); + /// + /// // Moving the key 'z' down to 9 will shift others up, including the '*' that was at 9. + /// assert_eq!(map.shift_insert(9, 'z', ()), Some(())); + /// assert_eq!(map.get_index_of(&'z'), Some(9)); + /// assert_eq!(map.get_index_of(&'*'), Some(10)); + /// + /// // Existing keys can move to len-1 at most, but new keys can insert at the endpoint. + /// assert_eq!(map.len(), 27); + /// assert_eq!(map.shift_insert(map.len() - 1, '*', ()), Some(())); + /// assert_eq!(map.get_index_of(&'*'), Some(26)); + /// assert_eq!(map.shift_insert(map.len(), '+', ()), None); + /// assert_eq!(map.get_index_of(&'+'), Some(27)); + /// assert_eq!(map.len(), 28); + /// ``` + /// + /// ```should_panic + /// use indexmap::IndexMap; + /// let mut map: IndexMap = ('a'..='z').map(|c| (c, ())).collect(); + /// + /// // This is an invalid index for moving an existing key! + /// map.shift_insert(map.len(), 'a', ()); + /// ``` + pub fn shift_insert(&mut self, index: usize, key: K, value: V) -> Option { + let len = self.len(); + match self.entry(key) { + Entry::Occupied(mut entry) => { + assert!(index < len, "index out of bounds"); + let old = mem::replace(entry.get_mut(), value); + entry.move_index(index); + Some(old) + } + Entry::Vacant(entry) => { + assert!(index <= len, "index out of bounds"); + entry.shift_insert(index, value); + None + } + } + } + + /// Get the given key’s corresponding entry in the map for insertion and/or + /// in-place manipulation. + /// + /// Computes in **O(1)** time (amortized average). + pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { + let hash = self.hash(&key); + self.core.entry(hash, key) + } + + /// Creates a splicing iterator that replaces the specified range in the map + /// with the given `replace_with` key-value iterator and yields the removed + /// items. `replace_with` does not need to be the same length as `range`. + /// + /// The `range` is removed even if the iterator is not consumed until the + /// end. It is unspecified how many elements are removed from the map if the + /// `Splice` value is leaked. + /// + /// The input iterator `replace_with` is only consumed when the `Splice` + /// value is dropped. If a key from the iterator matches an existing entry + /// in the map (outside of `range`), then the value will be updated in that + /// position. Otherwise, the new key-value pair will be inserted in the + /// replaced `range`. + /// + /// ***Panics*** if the starting point is greater than the end point or if + /// the end point is greater than the length of the map. + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexMap; + /// + /// let mut map = IndexMap::from([(0, '_'), (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]); + /// let new = [(5, 'E'), (4, 'D'), (3, 'C'), (2, 'B'), (1, 'A')]; + /// let removed: Vec<_> = map.splice(2..4, new).collect(); + /// + /// // 1 and 4 got new values, while 5, 3, and 2 were newly inserted. + /// assert!(map.into_iter().eq([(0, '_'), (1, 'A'), (5, 'E'), (3, 'C'), (2, 'B'), (4, 'D')])); + /// assert_eq!(removed, &[(2, 'b'), (3, 'c')]); + /// ``` + pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, K, V, S> + where + R: RangeBounds, + I: IntoIterator, + { + Splice::new(self, range, replace_with.into_iter()) + } + + /// Moves all key-value pairs from `other` into `self`, leaving `other` empty. + /// + /// This is equivalent to calling [`insert`][Self::insert] for each + /// key-value pair from `other` in order, which means that for keys that + /// already exist in `self`, their value is updated in the current position. + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexMap; + /// + /// // Note: Key (3) is present in both maps. + /// let mut a = IndexMap::from([(3, "c"), (2, "b"), (1, "a")]); + /// let mut b = IndexMap::from([(3, "d"), (4, "e"), (5, "f")]); + /// let old_capacity = b.capacity(); + /// + /// a.append(&mut b); + /// + /// assert_eq!(a.len(), 5); + /// assert_eq!(b.len(), 0); + /// assert_eq!(b.capacity(), old_capacity); + /// + /// assert!(a.keys().eq(&[3, 2, 1, 4, 5])); + /// assert_eq!(a[&3], "d"); // "c" was overwritten. + /// ``` + pub fn append(&mut self, other: &mut IndexMap) { + self.extend(other.drain(..)); + } +} + +impl IndexMap +where + S: BuildHasher, +{ + pub(crate) fn hash(&self, key: &Q) -> HashValue { + let mut h = self.hash_builder.build_hasher(); + key.hash(&mut h); + HashValue(h.finish() as usize) + } + + /// Return `true` if an equivalent to `key` exists in the map. + /// + /// Computes in **O(1)** time (average). + pub fn contains_key(&self, key: &Q) -> bool + where + Q: ?Sized + Hash + Equivalent, + { + self.get_index_of(key).is_some() + } + + /// Return a reference to the value stored for `key`, if it is present, + /// else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get(&self, key: &Q) -> Option<&V> + where + Q: ?Sized + Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &self.as_entries()[i]; + Some(&entry.value) + } else { + None + } + } + + /// Return references to the key-value pair stored for `key`, + /// if it is present, else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> + where + Q: ?Sized + Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &self.as_entries()[i]; + Some((&entry.key, &entry.value)) + } else { + None + } + } + + /// Return item index, key and value + pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> + where + Q: ?Sized + Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &self.as_entries()[i]; + Some((i, &entry.key, &entry.value)) + } else { + None + } + } + + /// Return item index, if it exists in the map + /// + /// Computes in **O(1)** time (average). + pub fn get_index_of(&self, key: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + match self.as_entries() { + [] => None, + [x] => key.equivalent(&x.key).then_some(0), + _ => { + let hash = self.hash(key); + self.core.get_index_of(hash, key) + } + } + } + + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where + Q: ?Sized + Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &mut self.as_entries_mut()[i]; + Some(&mut entry.value) + } else { + None + } + } + + pub fn get_full_mut(&mut self, key: &Q) -> Option<(usize, &K, &mut V)> + where + Q: ?Sized + Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &mut self.as_entries_mut()[i]; + Some((i, &entry.key, &mut entry.value)) + } else { + None + } + } + + /// Remove the key-value pair equivalent to `key` and return + /// its value. + /// + /// **NOTE:** This is equivalent to [`.swap_remove(key)`][Self::swap_remove], replacing this + /// entry's position with the last element, and it is deprecated in favor of calling that + /// explicitly. If you need to preserve the relative order of the keys in the map, use + /// [`.shift_remove(key)`][Self::shift_remove] instead. + #[deprecated(note = "`remove` disrupts the map order -- \ + use `swap_remove` or `shift_remove` for explicit behavior.")] + pub fn remove(&mut self, key: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.swap_remove(key) + } + + /// Remove and return the key-value pair equivalent to `key`. + /// + /// **NOTE:** This is equivalent to [`.swap_remove_entry(key)`][Self::swap_remove_entry], + /// replacing this entry's position with the last element, and it is deprecated in favor of + /// calling that explicitly. If you need to preserve the relative order of the keys in the map, + /// use [`.shift_remove_entry(key)`][Self::shift_remove_entry] instead. + #[deprecated(note = "`remove_entry` disrupts the map order -- \ + use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] + pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + Q: ?Sized + Hash + Equivalent, + { + self.swap_remove_entry(key) + } + + /// Remove the key-value pair equivalent to `key` and return + /// its value. + /// + /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(&mut self, key: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.swap_remove_full(key).map(third) + } + + /// Remove and return the key-value pair equivalent to `key`. + /// + /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + Q: ?Sized + Hash + Equivalent, + { + match self.swap_remove_full(key) { + Some((_, key, value)) => Some((key, value)), + None => None, + } + } + + /// Remove the key-value pair equivalent to `key` and return it and + /// the index it had. + /// + /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> + where + Q: ?Sized + Hash + Equivalent, + { + match self.as_entries() { + [x] if key.equivalent(&x.key) => { + let (k, v) = self.core.pop()?; + Some((0, k, v)) + } + [_] | [] => None, + _ => { + let hash = self.hash(key); + self.core.swap_remove_full(hash, key) + } + } + } + + /// Remove the key-value pair equivalent to `key` and return + /// its value. + /// + /// Like [`Vec::remove`], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(&mut self, key: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.shift_remove_full(key).map(third) + } + + /// Remove and return the key-value pair equivalent to `key`. + /// + /// Like [`Vec::remove`], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_entry(&mut self, key: &Q) -> Option<(K, V)> + where + Q: ?Sized + Hash + Equivalent, + { + match self.shift_remove_full(key) { + Some((_, key, value)) => Some((key, value)), + None => None, + } + } + + /// Remove the key-value pair equivalent to `key` and return it and + /// the index it had. + /// + /// Like [`Vec::remove`], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `key` is not in map. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> + where + Q: ?Sized + Hash + Equivalent, + { + match self.as_entries() { + [x] if key.equivalent(&x.key) => { + let (k, v) = self.core.pop()?; + Some((0, k, v)) + } + [_] | [] => None, + _ => { + let hash = self.hash(key); + self.core.shift_remove_full(hash, key) + } + } + } +} + +impl IndexMap { + /// Remove the last key-value pair + /// + /// This preserves the order of the remaining elements. + /// + /// Computes in **O(1)** time (average). + #[doc(alias = "pop_last")] // like `BTreeMap` + pub fn pop(&mut self) -> Option<(K, V)> { + self.core.pop() + } + + /// Scan through each key-value pair in the map and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + pub fn retain(&mut self, mut keep: F) + where + F: FnMut(&K, &mut V) -> bool, + { + self.core.retain_in_order(move |k, v| keep(k, v)); + } + + /// Sort the map’s key-value pairs by the default ordering of the keys. + /// + /// This is a stable sort -- but equivalent keys should not normally coexist in + /// a map at all, so [`sort_unstable_keys`][Self::sort_unstable_keys] is preferred + /// because it is generally faster and doesn't allocate auxiliary memory. + /// + /// See [`sort_by`](Self::sort_by) for details. + pub fn sort_keys(&mut self) + where + K: Ord, + { + self.with_entries(move |entries| { + entries.sort_by(move |a, b| K::cmp(&a.key, &b.key)); + }); + } + + /// Sort the map’s key-value pairs in place using the comparison + /// function `cmp`. + /// + /// The comparison function receives two key and value pairs to compare (you + /// can sort by keys or values or their combination as needed). + /// + /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is + /// the length of the map and *c* the capacity. The sort is stable. + pub fn sort_by(&mut self, mut cmp: F) + where + F: FnMut(&K, &V, &K, &V) -> Ordering, + { + self.with_entries(move |entries| { + entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + }); + } + + /// Sort the key-value pairs of the map and return a by-value iterator of + /// the key-value pairs with the result. + /// + /// The sort is stable. + pub fn sorted_by(self, mut cmp: F) -> IntoIter + where + F: FnMut(&K, &V, &K, &V) -> Ordering, + { + let mut entries = self.into_entries(); + entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + IntoIter::new(entries) + } + + /// Sort the map's key-value pairs by the default ordering of the keys, but + /// may not preserve the order of equal elements. + /// + /// See [`sort_unstable_by`](Self::sort_unstable_by) for details. + pub fn sort_unstable_keys(&mut self) + where + K: Ord, + { + self.with_entries(move |entries| { + entries.sort_unstable_by(move |a, b| K::cmp(&a.key, &b.key)); + }); + } + + /// Sort the map's key-value pairs in place using the comparison function `cmp`, but + /// may not preserve the order of equal elements. + /// + /// The comparison function receives two key and value pairs to compare (you + /// can sort by keys or values or their combination as needed). + /// + /// Computes in **O(n log n + c)** time where *n* is + /// the length of the map and *c* is the capacity. The sort is unstable. + pub fn sort_unstable_by(&mut self, mut cmp: F) + where + F: FnMut(&K, &V, &K, &V) -> Ordering, + { + self.with_entries(move |entries| { + entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + }); + } + + /// Sort the key-value pairs of the map and return a by-value iterator of + /// the key-value pairs with the result. + /// + /// The sort is unstable. + #[inline] + pub fn sorted_unstable_by(self, mut cmp: F) -> IntoIter + where + F: FnMut(&K, &V, &K, &V) -> Ordering, + { + let mut entries = self.into_entries(); + entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + IntoIter::new(entries) + } + + /// Sort the map’s key-value pairs in place using a sort-key extraction function. + /// + /// During sorting, the function is called at most once per entry, by using temporary storage + /// to remember the results of its evaluation. The order of calls to the function is + /// unspecified and may change between versions of `indexmap` or the standard library. + /// + /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is + /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable. + pub fn sort_by_cached_key(&mut self, mut sort_key: F) + where + T: Ord, + F: FnMut(&K, &V) -> T, + { + self.with_entries(move |entries| { + entries.sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); + }); + } + + /// Search over a sorted map for a key. + /// + /// Returns the position where that key is present, or the position where it can be inserted to + /// maintain the sort. See [`slice::binary_search`] for more details. + /// + /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up + /// using [`get_index_of`][IndexMap::get_index_of], but this can also position missing keys. + pub fn binary_search_keys(&self, x: &K) -> Result + where + K: Ord, + { + self.as_slice().binary_search_keys(x) + } + + /// Search over a sorted map with a comparator function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result + where + F: FnMut(&'a K, &'a V) -> Ordering, + { + self.as_slice().binary_search_by(f) + } + + /// Search over a sorted map with an extraction function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result + where + F: FnMut(&'a K, &'a V) -> B, + B: Ord, + { + self.as_slice().binary_search_by_key(b, f) + } + + /// Returns the index of the partition point of a sorted map according to the given predicate + /// (the index of the first element of the second partition). + /// + /// See [`slice::partition_point`] for more details. + /// + /// Computes in **O(log(n))** time. + #[must_use] + pub fn partition_point

(&self, pred: P) -> usize + where + P: FnMut(&K, &V) -> bool, + { + self.as_slice().partition_point(pred) + } + + /// Reverses the order of the map’s key-value pairs in place. + /// + /// Computes in **O(n)** time and **O(1)** space. + pub fn reverse(&mut self) { + self.core.reverse() + } + + /// Returns a slice of all the key-value pairs in the map. + /// + /// Computes in **O(1)** time. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.as_entries()) + } + + /// Returns a mutable slice of all the key-value pairs in the map. + /// + /// Computes in **O(1)** time. + pub fn as_mut_slice(&mut self) -> &mut Slice { + Slice::from_mut_slice(self.as_entries_mut()) + } + + /// Converts into a boxed slice of all the key-value pairs in the map. + /// + /// Note that this will drop the inner hash table and any excess capacity. + pub fn into_boxed_slice(self) -> Box> { + Slice::from_boxed(self.into_entries().into_boxed_slice()) + } + + /// Get a key-value pair by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { + self.as_entries().get(index).map(Bucket::refs) + } + + /// Get a key-value pair by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { + self.as_entries_mut().get_mut(index).map(Bucket::ref_mut) + } + + /// Get an entry in the map by index for in-place manipulation. + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_index_entry(&mut self, index: usize) -> Option> { + if index >= self.len() { + return None; + } + Some(IndexedEntry::new(&mut self.core, index)) + } + + /// Returns a slice of key-value pairs in the given range of indices. + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_range>(&self, range: R) -> Option<&Slice> { + let entries = self.as_entries(); + let range = try_simplify_range(range, entries.len())?; + entries.get(range).map(Slice::from_slice) + } + + /// Returns a mutable slice of key-value pairs in the given range of indices. + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Slice> { + let entries = self.as_entries_mut(); + let range = try_simplify_range(range, entries.len())?; + entries.get_mut(range).map(Slice::from_mut_slice) + } + + /// Get the first key-value pair + /// + /// Computes in **O(1)** time. + #[doc(alias = "first_key_value")] // like `BTreeMap` + pub fn first(&self) -> Option<(&K, &V)> { + self.as_entries().first().map(Bucket::refs) + } + + /// Get the first key-value pair, with mutable access to the value + /// + /// Computes in **O(1)** time. + pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { + self.as_entries_mut().first_mut().map(Bucket::ref_mut) + } + + /// Get the first entry in the map for in-place manipulation. + /// + /// Computes in **O(1)** time. + pub fn first_entry(&mut self) -> Option> { + self.get_index_entry(0) + } + + /// Get the last key-value pair + /// + /// Computes in **O(1)** time. + #[doc(alias = "last_key_value")] // like `BTreeMap` + pub fn last(&self) -> Option<(&K, &V)> { + self.as_entries().last().map(Bucket::refs) + } + + /// Get the last key-value pair, with mutable access to the value + /// + /// Computes in **O(1)** time. + pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { + self.as_entries_mut().last_mut().map(Bucket::ref_mut) + } + + /// Get the last entry in the map for in-place manipulation. + /// + /// Computes in **O(1)** time. + pub fn last_entry(&mut self) -> Option> { + self.get_index_entry(self.len().checked_sub(1)?) + } + + /// Remove the key-value pair by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.core.swap_remove_index(index) + } + + /// Remove the key-value pair by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Like [`Vec::remove`], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.core.shift_remove_index(index) + } + + /// Moves the position of a key-value pair from one index to another + /// by shifting all other pairs in-between. + /// + /// * If `from < to`, the other pairs will shift down while the targeted pair moves up. + /// * If `from > to`, the other pairs will shift up while the targeted pair moves down. + /// + /// ***Panics*** if `from` or `to` are out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn move_index(&mut self, from: usize, to: usize) { + self.core.move_index(from, to) + } + + /// Swaps the position of two key-value pairs in the map. + /// + /// ***Panics*** if `a` or `b` are out of bounds. + /// + /// Computes in **O(1)** time (average). + pub fn swap_indices(&mut self, a: usize, b: usize) { + self.core.swap_indices(a, b) + } +} + +/// Access [`IndexMap`] values corresponding to a key. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// map.insert(word.to_lowercase(), word.to_uppercase()); +/// } +/// assert_eq!(map["lorem"], "LOREM"); +/// assert_eq!(map["ipsum"], "IPSUM"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// map.insert("foo", 1); +/// println!("{:?}", map["bar"]); // panics! +/// ``` +impl Index<&Q> for IndexMap +where + Q: Hash + Equivalent, + S: BuildHasher, +{ + type Output = V; + + /// Returns a reference to the value corresponding to the supplied `key`. + /// + /// ***Panics*** if `key` is not present in the map. + fn index(&self, key: &Q) -> &V { + self.get(key).expect("IndexMap: key not found") + } +} + +/// Access [`IndexMap`] values corresponding to a key. +/// +/// Mutable indexing allows changing / updating values of key-value +/// pairs that are already present. +/// +/// You can **not** insert new pairs with index syntax, use `.insert()`. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// map.insert(word.to_lowercase(), word.to_string()); +/// } +/// let lorem = &mut map["lorem"]; +/// assert_eq!(lorem, "Lorem"); +/// lorem.retain(char::is_lowercase); +/// assert_eq!(map["lorem"], "orem"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// map.insert("foo", 1); +/// map["bar"] = 1; // panics! +/// ``` +impl IndexMut<&Q> for IndexMap +where + Q: Hash + Equivalent, + S: BuildHasher, +{ + /// Returns a mutable reference to the value corresponding to the supplied `key`. + /// + /// ***Panics*** if `key` is not present in the map. + fn index_mut(&mut self, key: &Q) -> &mut V { + self.get_mut(key).expect("IndexMap: key not found") + } +} + +/// Access [`IndexMap`] values at indexed positions. +/// +/// See [`Index for Keys`][keys] to access a map's keys instead. +/// +/// [keys]: Keys#impl-Index-for-Keys<'a,+K,+V> +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// map.insert(word.to_lowercase(), word.to_uppercase()); +/// } +/// assert_eq!(map[0], "LOREM"); +/// assert_eq!(map[1], "IPSUM"); +/// map.reverse(); +/// assert_eq!(map[0], "AMET"); +/// assert_eq!(map[1], "SIT"); +/// map.sort_keys(); +/// assert_eq!(map[0], "AMET"); +/// assert_eq!(map[1], "DOLOR"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// map.insert("foo", 1); +/// println!("{:?}", map[10]); // panics! +/// ``` +impl Index for IndexMap { + type Output = V; + + /// Returns a reference to the value at the supplied `index`. + /// + /// ***Panics*** if `index` is out of bounds. + fn index(&self, index: usize) -> &V { + self.get_index(index) + .expect("IndexMap: index out of bounds") + .1 + } +} + +/// Access [`IndexMap`] values at indexed positions. +/// +/// Mutable indexing allows changing / updating indexed values +/// that are already present. +/// +/// You can **not** insert new values with index syntax -- use [`.insert()`][IndexMap::insert]. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// map.insert(word.to_lowercase(), word.to_string()); +/// } +/// let lorem = &mut map[0]; +/// assert_eq!(lorem, "Lorem"); +/// lorem.retain(char::is_lowercase); +/// assert_eq!(map["lorem"], "orem"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// map.insert("foo", 1); +/// map[10] = 1; // panics! +/// ``` +impl IndexMut for IndexMap { + /// Returns a mutable reference to the value at the supplied `index`. + /// + /// ***Panics*** if `index` is out of bounds. + fn index_mut(&mut self, index: usize) -> &mut V { + self.get_index_mut(index) + .expect("IndexMap: index out of bounds") + .1 + } +} + +impl FromIterator<(K, V)> for IndexMap +where + K: Hash + Eq, + S: BuildHasher + Default, +{ + /// Create an `IndexMap` from the sequence of key-value pairs in the + /// iterable. + /// + /// `from_iter` uses the same logic as `extend`. See + /// [`extend`][IndexMap::extend] for more details. + fn from_iter>(iterable: I) -> Self { + let iter = iterable.into_iter(); + let (low, _) = iter.size_hint(); + let mut map = Self::with_capacity_and_hasher(low, <_>::default()); + map.extend(iter); + map + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl From<[(K, V); N]> for IndexMap +where + K: Hash + Eq, +{ + /// # Examples + /// + /// ``` + /// use indexmap::IndexMap; + /// + /// let map1 = IndexMap::from([(1, 2), (3, 4)]); + /// let map2: IndexMap<_, _> = [(1, 2), (3, 4)].into(); + /// assert_eq!(map1, map2); + /// ``` + fn from(arr: [(K, V); N]) -> Self { + Self::from_iter(arr) + } +} + +impl Extend<(K, V)> for IndexMap +where + K: Hash + Eq, + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// This is equivalent to calling [`insert`][IndexMap::insert] for each of + /// them in order, which means that for keys that already existed + /// in the map, their value is updated but it keeps the existing order. + /// + /// New keys are inserted in the order they appear in the sequence. If + /// equivalents of a key occur more than once, the last corresponding value + /// prevails. + fn extend>(&mut self, iterable: I) { + // (Note: this is a copy of `std`/`hashbrown`'s reservation logic.) + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let iter = iterable.into_iter(); + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + (iter.size_hint().0 + 1) / 2 + }; + self.reserve(reserve); + iter.for_each(move |(k, v)| { + self.insert(k, v); + }); + } +} + +impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap +where + K: Hash + Eq + Copy, + V: Copy, + S: BuildHasher, +{ + /// Extend the map with all key-value pairs in the iterable. + /// + /// See the first extend method for more details. + fn extend>(&mut self, iterable: I) { + self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))); + } +} + +impl Default for IndexMap +where + S: Default, +{ + /// Return an empty [`IndexMap`] + fn default() -> Self { + Self::with_capacity_and_hasher(0, S::default()) + } +} + +impl PartialEq> for IndexMap +where + K: Hash + Eq, + V1: PartialEq, + S1: BuildHasher, + S2: BuildHasher, +{ + fn eq(&self, other: &IndexMap) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl Eq for IndexMap +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, +{ +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core.rs new file mode 100644 index 000000000000..f42cccbffaec --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core.rs @@ -0,0 +1,718 @@ +//! This is the core implementation that doesn't depend on the hasher at all. +//! +//! The methods of `IndexMapCore` don't use any Hash properties of K. +//! +//! It's cleaner to separate them out, then the compiler checks that we are not +//! using Hash at all in these methods. +//! +//! However, we should probably not let this show in the public API or docs. + +mod entry; + +pub mod raw_entry_v1; + +use hashbrown::hash_table; + +use crate::vec::{self, Vec}; +use crate::TryReserveError; +use core::mem; +use core::ops::RangeBounds; + +use crate::util::simplify_range; +use crate::{Bucket, Equivalent, HashValue}; + +type Indices = hash_table::HashTable; +type Entries = Vec>; + +pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; + +/// Core of the map that does not depend on S +#[derive(Debug)] +pub(crate) struct IndexMapCore { + /// indices mapping from the entry hash to its index. + indices: Indices, + /// entries is a dense vec maintaining entry order. + entries: Entries, +} + +/// Mutable references to the parts of an `IndexMapCore`. +/// +/// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our +/// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type +/// is used to implement methods on the split references, and `IndexMapCore` can also call those to +/// avoid duplication. +struct RefMut<'a, K, V> { + indices: &'a mut Indices, + entries: &'a mut Entries, +} + +#[inline(always)] +fn get_hash(entries: &[Bucket]) -> impl Fn(&usize) -> u64 + '_ { + move |&i| entries[i].hash.get() +} + +#[inline] +fn equivalent<'a, K, V, Q: ?Sized + Equivalent>( + key: &'a Q, + entries: &'a [Bucket], +) -> impl Fn(&usize) -> bool + 'a { + move |&i| Q::equivalent(key, &entries[i].key) +} + +#[inline] +fn erase_index(table: &mut Indices, hash: HashValue, index: usize) { + if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) { + entry.remove(); + } else if cfg!(debug_assertions) { + panic!("index not found"); + } +} + +#[inline] +fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) { + let index = table + .find_mut(hash.get(), move |&i| i == old) + .expect("index not found"); + *index = new; +} + +/// Inserts many entries into the indices table without reallocating, +/// and without regard for duplication. +/// +/// ***Panics*** if there is not sufficient capacity already. +fn insert_bulk_no_grow(indices: &mut Indices, entries: &[Bucket]) { + assert!(indices.capacity() - indices.len() >= entries.len()); + for entry in entries { + indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!()); + } +} + +impl Clone for IndexMapCore +where + K: Clone, + V: Clone, +{ + fn clone(&self) -> Self { + let mut new = Self::new(); + new.clone_from(self); + new + } + + fn clone_from(&mut self, other: &Self) { + self.indices.clone_from(&other.indices); + if self.entries.capacity() < other.entries.len() { + // If we must resize, match the indices capacity. + let additional = other.entries.len() - self.entries.len(); + self.borrow_mut().reserve_entries(additional); + } + self.entries.clone_from(&other.entries); + } +} + +impl crate::Entries for IndexMapCore { + type Entry = Bucket; + + #[inline] + fn into_entries(self) -> Vec { + self.entries + } + + #[inline] + fn as_entries(&self) -> &[Self::Entry] { + &self.entries + } + + #[inline] + fn as_entries_mut(&mut self) -> &mut [Self::Entry] { + &mut self.entries + } + + fn with_entries(&mut self, f: F) + where + F: FnOnce(&mut [Self::Entry]), + { + f(&mut self.entries); + self.rebuild_hash_table(); + } +} + +impl IndexMapCore { + /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`. + const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::>(); + + #[inline] + pub(crate) const fn new() -> Self { + IndexMapCore { + indices: Indices::new(), + entries: Vec::new(), + } + } + + #[inline] + fn borrow_mut(&mut self) -> RefMut<'_, K, V> { + RefMut::new(&mut self.indices, &mut self.entries) + } + + #[inline] + pub(crate) fn with_capacity(n: usize) -> Self { + IndexMapCore { + indices: Indices::with_capacity(n), + entries: Vec::with_capacity(n), + } + } + + #[inline] + pub(crate) fn len(&self) -> usize { + self.indices.len() + } + + #[inline] + pub(crate) fn capacity(&self) -> usize { + Ord::min(self.indices.capacity(), self.entries.capacity()) + } + + pub(crate) fn clear(&mut self) { + self.indices.clear(); + self.entries.clear(); + } + + pub(crate) fn truncate(&mut self, len: usize) { + if len < self.len() { + self.erase_indices(len, self.entries.len()); + self.entries.truncate(len); + } + } + + pub(crate) fn drain(&mut self, range: R) -> vec::Drain<'_, Bucket> + where + R: RangeBounds, + { + let range = simplify_range(range, self.entries.len()); + self.erase_indices(range.start, range.end); + self.entries.drain(range) + } + + #[cfg(feature = "rayon")] + pub(crate) fn par_drain(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket> + where + K: Send, + V: Send, + R: RangeBounds, + { + use rayon::iter::ParallelDrainRange; + let range = simplify_range(range, self.entries.len()); + self.erase_indices(range.start, range.end); + self.entries.par_drain(range) + } + + pub(crate) fn split_off(&mut self, at: usize) -> Self { + assert!(at <= self.entries.len()); + self.erase_indices(at, self.entries.len()); + let entries = self.entries.split_off(at); + + let mut indices = Indices::with_capacity(entries.len()); + insert_bulk_no_grow(&mut indices, &entries); + Self { indices, entries } + } + + pub(crate) fn split_splice(&mut self, range: R) -> (Self, vec::IntoIter>) + where + R: RangeBounds, + { + let range = simplify_range(range, self.len()); + self.erase_indices(range.start, self.entries.len()); + let entries = self.entries.split_off(range.end); + let drained = self.entries.split_off(range.start); + + let mut indices = Indices::with_capacity(entries.len()); + insert_bulk_no_grow(&mut indices, &entries); + (Self { indices, entries }, drained.into_iter()) + } + + /// Append from another map without checking whether items already exist. + pub(crate) fn append_unchecked(&mut self, other: &mut Self) { + self.reserve(other.len()); + insert_bulk_no_grow(&mut self.indices, &other.entries); + self.entries.append(&mut other.entries); + other.indices.clear(); + } + + /// Reserve capacity for `additional` more key-value pairs. + pub(crate) fn reserve(&mut self, additional: usize) { + self.indices.reserve(additional, get_hash(&self.entries)); + // Only grow entries if necessary, since we also round up capacity. + if additional > self.entries.capacity() - self.entries.len() { + self.borrow_mut().reserve_entries(additional); + } + } + + /// Reserve capacity for `additional` more key-value pairs, without over-allocating. + pub(crate) fn reserve_exact(&mut self, additional: usize) { + self.indices.reserve(additional, get_hash(&self.entries)); + self.entries.reserve_exact(additional); + } + + /// Try to reserve capacity for `additional` more key-value pairs. + pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.indices + .try_reserve(additional, get_hash(&self.entries)) + .map_err(TryReserveError::from_hashbrown)?; + // Only grow entries if necessary, since we also round up capacity. + if additional > self.entries.capacity() - self.entries.len() { + self.try_reserve_entries(additional) + } else { + Ok(()) + } + } + + /// Try to reserve entries capacity, rounded up to match the indices + fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> { + // Use a soft-limit on the maximum capacity, but if the caller explicitly + // requested more, do it and let them have the resulting error. + let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); + let try_add = new_capacity - self.entries.len(); + if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { + return Ok(()); + } + self.entries + .try_reserve_exact(additional) + .map_err(TryReserveError::from_alloc) + } + + /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. + pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.indices + .try_reserve(additional, get_hash(&self.entries)) + .map_err(TryReserveError::from_hashbrown)?; + self.entries + .try_reserve_exact(additional) + .map_err(TryReserveError::from_alloc) + } + + /// Shrink the capacity of the map with a lower bound + pub(crate) fn shrink_to(&mut self, min_capacity: usize) { + self.indices + .shrink_to(min_capacity, get_hash(&self.entries)); + self.entries.shrink_to(min_capacity); + } + + /// Remove the last key-value pair + pub(crate) fn pop(&mut self) -> Option<(K, V)> { + if let Some(entry) = self.entries.pop() { + let last = self.entries.len(); + erase_index(&mut self.indices, entry.hash, last); + Some((entry.key, entry.value)) + } else { + None + } + } + + /// Return the index in `entries` where an equivalent key can be found + pub(crate) fn get_index_of(&self, hash: HashValue, key: &Q) -> Option + where + Q: ?Sized + Equivalent, + { + let eq = equivalent(key, &self.entries); + self.indices.find(hash.get(), eq).copied() + } + + pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option) + where + K: Eq, + { + let eq = equivalent(&key, &self.entries); + let hasher = get_hash(&self.entries); + match self.indices.entry(hash.get(), eq, hasher) { + hash_table::Entry::Occupied(entry) => { + let i = *entry.get(); + (i, Some(mem::replace(&mut self.entries[i].value, value))) + } + hash_table::Entry::Vacant(entry) => { + let i = self.entries.len(); + entry.insert(i); + self.borrow_mut().push_entry(hash, key, value); + debug_assert_eq!(self.indices.len(), self.entries.len()); + (i, None) + } + } + } + + /// Same as `insert_full`, except it also replaces the key + pub(crate) fn replace_full( + &mut self, + hash: HashValue, + key: K, + value: V, + ) -> (usize, Option<(K, V)>) + where + K: Eq, + { + let eq = equivalent(&key, &self.entries); + let hasher = get_hash(&self.entries); + match self.indices.entry(hash.get(), eq, hasher) { + hash_table::Entry::Occupied(entry) => { + let i = *entry.get(); + let entry = &mut self.entries[i]; + let kv = ( + mem::replace(&mut entry.key, key), + mem::replace(&mut entry.value, value), + ); + (i, Some(kv)) + } + hash_table::Entry::Vacant(entry) => { + let i = self.entries.len(); + entry.insert(i); + self.borrow_mut().push_entry(hash, key, value); + debug_assert_eq!(self.indices.len(), self.entries.len()); + (i, None) + } + } + } + + /// Remove an entry by shifting all entries that follow it + pub(crate) fn shift_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> + where + Q: ?Sized + Equivalent, + { + let eq = equivalent(key, &self.entries); + match self.indices.find_entry(hash.get(), eq) { + Ok(entry) => { + let (index, _) = entry.remove(); + let (key, value) = self.borrow_mut().shift_remove_finish(index); + Some((index, key, value)) + } + Err(_) => None, + } + } + + /// Remove an entry by shifting all entries that follow it + #[inline] + pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.borrow_mut().shift_remove_index(index) + } + + #[inline] + pub(super) fn move_index(&mut self, from: usize, to: usize) { + self.borrow_mut().move_index(from, to); + } + + #[inline] + pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { + self.borrow_mut().swap_indices(a, b); + } + + /// Remove an entry by swapping it with the last + pub(crate) fn swap_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> + where + Q: ?Sized + Equivalent, + { + let eq = equivalent(key, &self.entries); + match self.indices.find_entry(hash.get(), eq) { + Ok(entry) => { + let (index, _) = entry.remove(); + let (key, value) = self.borrow_mut().swap_remove_finish(index); + Some((index, key, value)) + } + Err(_) => None, + } + } + + /// Remove an entry by swapping it with the last + #[inline] + pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.borrow_mut().swap_remove_index(index) + } + + /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` + /// + /// All of these items should still be at their original location in `entries`. + /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. + fn erase_indices(&mut self, start: usize, end: usize) { + let (init, shifted_entries) = self.entries.split_at(end); + let (start_entries, erased_entries) = init.split_at(start); + + let erased = erased_entries.len(); + let shifted = shifted_entries.len(); + let half_capacity = self.indices.capacity() / 2; + + // Use a heuristic between different strategies + if erased == 0 { + // Degenerate case, nothing to do + } else if start + shifted < half_capacity && start < erased { + // Reinsert everything, as there are few kept indices + self.indices.clear(); + + // Reinsert stable indices, then shifted indices + insert_bulk_no_grow(&mut self.indices, start_entries); + insert_bulk_no_grow(&mut self.indices, shifted_entries); + } else if erased + shifted < half_capacity { + // Find each affected index, as there are few to adjust + + // Find erased indices + for (i, entry) in (start..).zip(erased_entries) { + erase_index(&mut self.indices, entry.hash, i); + } + + // Find shifted indices + for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { + update_index(&mut self.indices, entry.hash, old, new); + } + } else { + // Sweep the whole table for adjustments + let offset = end - start; + self.indices.retain(move |i| { + if *i >= end { + *i -= offset; + true + } else { + *i < start + } + }); + } + + debug_assert_eq!(self.indices.len(), start + shifted); + } + + pub(crate) fn retain_in_order(&mut self, mut keep: F) + where + F: FnMut(&mut K, &mut V) -> bool, + { + self.entries + .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); + if self.entries.len() < self.indices.len() { + self.rebuild_hash_table(); + } + } + + fn rebuild_hash_table(&mut self) { + self.indices.clear(); + insert_bulk_no_grow(&mut self.indices, &self.entries); + } + + pub(crate) fn reverse(&mut self) { + self.entries.reverse(); + + // No need to save hash indices, can easily calculate what they should + // be, given that this is an in-place reversal. + let len = self.entries.len(); + for i in &mut self.indices { + *i = len - *i - 1; + } + } +} + +impl<'a, K, V> RefMut<'a, K, V> { + #[inline] + fn new(indices: &'a mut Indices, entries: &'a mut Entries) -> Self { + Self { indices, entries } + } + + /// Reserve entries capacity, rounded up to match the indices + fn reserve_entries(&mut self, additional: usize) { + // Use a soft-limit on the maximum capacity, but if the caller explicitly + // requested more, do it and let them have the resulting panic. + let new_capacity = Ord::min( + self.indices.capacity(), + IndexMapCore::::MAX_ENTRIES_CAPACITY, + ); + let try_add = new_capacity - self.entries.len(); + if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { + return; + } + self.entries.reserve_exact(additional); + } + + /// Append a key-value pair to `entries`, + /// *without* checking whether it already exists. + fn push_entry(&mut self, hash: HashValue, key: K, value: V) { + if self.entries.len() == self.entries.capacity() { + // Reserve our own capacity synced to the indices, + // rather than letting `Vec::push` just double it. + self.reserve_entries(1); + } + self.entries.push(Bucket { hash, key, value }); + } + + /// Insert a key-value pair in `entries` at a particular index, + /// *without* checking whether it already exists. + fn insert_entry(&mut self, index: usize, hash: HashValue, key: K, value: V) { + if self.entries.len() == self.entries.capacity() { + // Reserve our own capacity synced to the indices, + // rather than letting `Vec::insert` just double it. + self.reserve_entries(1); + } + self.entries.insert(index, Bucket { hash, key, value }); + } + + fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> usize { + let i = self.indices.len(); + self.indices + .insert_unique(hash.get(), i, get_hash(self.entries)); + debug_assert_eq!(i, self.entries.len()); + self.push_entry(hash, key, value); + i + } + + fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) { + let end = self.indices.len(); + assert!(index <= end); + // Increment others first so we don't have duplicate indices. + self.increment_indices(index, end); + let entries = &*self.entries; + self.indices.insert_unique(hash.get(), index, move |&i| { + // Adjust for the incremented indices to find hashes. + debug_assert_ne!(i, index); + let i = if i < index { i } else { i - 1 }; + entries[i].hash.get() + }); + self.insert_entry(index, hash, key, value); + } + + /// Remove an entry by shifting all entries that follow it + fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { + match self.entries.get(index) { + Some(entry) => { + erase_index(self.indices, entry.hash, index); + Some(self.shift_remove_finish(index)) + } + None => None, + } + } + + /// Remove an entry by shifting all entries that follow it + /// + /// The index should already be removed from `self.indices`. + fn shift_remove_finish(&mut self, index: usize) -> (K, V) { + // Correct indices that point to the entries that followed the removed entry. + self.decrement_indices(index + 1, self.entries.len()); + + // Use Vec::remove to actually remove the entry. + let entry = self.entries.remove(index); + (entry.key, entry.value) + } + + /// Remove an entry by swapping it with the last + fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { + match self.entries.get(index) { + Some(entry) => { + erase_index(self.indices, entry.hash, index); + Some(self.swap_remove_finish(index)) + } + None => None, + } + } + + /// Finish removing an entry by swapping it with the last + /// + /// The index should already be removed from `self.indices`. + fn swap_remove_finish(&mut self, index: usize) -> (K, V) { + // use swap_remove, but then we need to update the index that points + // to the other entry that has to move + let entry = self.entries.swap_remove(index); + + // correct index that points to the entry that had to swap places + if let Some(entry) = self.entries.get(index) { + // was not last element + // examine new element in `index` and find it in indices + let last = self.entries.len(); + update_index(self.indices, entry.hash, last, index); + } + + (entry.key, entry.value) + } + + /// Decrement all indices in the range `start..end`. + /// + /// The index `start - 1` should not exist in `self.indices`. + /// All entries should still be in their original positions. + fn decrement_indices(&mut self, start: usize, end: usize) { + // Use a heuristic between a full sweep vs. a `find()` for every shifted item. + let shifted_entries = &self.entries[start..end]; + if shifted_entries.len() > self.indices.capacity() / 2 { + // Shift all indices in range. + for i in &mut *self.indices { + if start <= *i && *i < end { + *i -= 1; + } + } + } else { + // Find each entry in range to shift its index. + for (i, entry) in (start..end).zip(shifted_entries) { + update_index(self.indices, entry.hash, i, i - 1); + } + } + } + + /// Increment all indices in the range `start..end`. + /// + /// The index `end` should not exist in `self.indices`. + /// All entries should still be in their original positions. + fn increment_indices(&mut self, start: usize, end: usize) { + // Use a heuristic between a full sweep vs. a `find()` for every shifted item. + let shifted_entries = &self.entries[start..end]; + if shifted_entries.len() > self.indices.capacity() / 2 { + // Shift all indices in range. + for i in &mut *self.indices { + if start <= *i && *i < end { + *i += 1; + } + } + } else { + // Find each entry in range to shift its index, updated in reverse so + // we never have duplicated indices that might have a hash collision. + for (i, entry) in (start..end).zip(shifted_entries).rev() { + update_index(self.indices, entry.hash, i, i + 1); + } + } + } + + fn move_index(&mut self, from: usize, to: usize) { + let from_hash = self.entries[from].hash; + let _ = self.entries[to]; // explicit bounds check + if from != to { + // Use a sentinel index so other indices don't collide. + update_index(self.indices, from_hash, from, usize::MAX); + + // Update all other indices and rotate the entry positions. + if from < to { + self.decrement_indices(from + 1, to + 1); + self.entries[from..=to].rotate_left(1); + } else if to < from { + self.increment_indices(to, from); + self.entries[to..=from].rotate_right(1); + } + + // Change the sentinel index to its final position. + update_index(self.indices, from_hash, usize::MAX, to); + } + } + + fn swap_indices(&mut self, a: usize, b: usize) { + // If they're equal and in-bounds, there's nothing to do. + if a == b && a < self.entries.len() { + return; + } + + // We'll get a "nice" bounds-check from indexing `entries`, + // and then we expect to find it in the table as well. + match self.indices.get_many_mut( + [self.entries[a].hash.get(), self.entries[b].hash.get()], + move |i, &x| if i == 0 { x == a } else { x == b }, + ) { + [Some(ref_a), Some(ref_b)] => { + mem::swap(ref_a, ref_b); + self.entries.swap(a, b); + } + _ => panic!("indices not found"), + } + } +} + +#[test] +fn assert_send_sync() { + fn assert_send_sync() {} + assert_send_sync::>(); + assert_send_sync::>(); + assert_send_sync::>(); + assert_send_sync::>(); +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core/entry.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core/entry.rs new file mode 100644 index 000000000000..f8a813673604 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core/entry.rs @@ -0,0 +1,535 @@ +use super::{equivalent, Entries, IndexMapCore, RefMut}; +use crate::HashValue; +use core::{fmt, mem}; +use hashbrown::hash_table; + +impl IndexMapCore { + pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V> + where + K: Eq, + { + let entries = &mut self.entries; + let eq = equivalent(&key, entries); + match self.indices.find_entry(hash.get(), eq) { + Ok(index) => Entry::Occupied(OccupiedEntry { entries, index }), + Err(absent) => Entry::Vacant(VacantEntry { + map: RefMut::new(absent.into_table(), entries), + hash, + key, + }), + } + } +} + +/// Entry for an existing key-value pair in an [`IndexMap`][crate::IndexMap] +/// or a vacant location to insert one. +pub enum Entry<'a, K, V> { + /// Existing slot with equivalent key. + Occupied(OccupiedEntry<'a, K, V>), + /// Vacant slot (no equivalent key in the map). + Vacant(VacantEntry<'a, K, V>), +} + +impl<'a, K, V> Entry<'a, K, V> { + /// Return the index where the key-value pair exists or will be inserted. + pub fn index(&self) -> usize { + match *self { + Entry::Occupied(ref entry) => entry.index(), + Entry::Vacant(ref entry) => entry.index(), + } + } + + /// Inserts the given default value in the entry if it is vacant and returns a mutable + /// reference to it. Otherwise a mutable reference to an already existent value is returned. + /// + /// Computes in **O(1)** time (amortized average). + pub fn or_insert(self, default: V) -> &'a mut V { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable + /// reference to it. Otherwise a mutable reference to an already existent value is returned. + /// + /// Computes in **O(1)** time (amortized average). + pub fn or_insert_with(self, call: F) -> &'a mut V + where + F: FnOnce() -> V, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(call()), + } + } + + /// Inserts the result of the `call` function with a reference to the entry's key if it is + /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to + /// an already existent value is returned. + /// + /// Computes in **O(1)** time (amortized average). + pub fn or_insert_with_key(self, call: F) -> &'a mut V + where + F: FnOnce(&K) -> V, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let value = call(&entry.key); + entry.insert(value) + } + } + } + + /// Gets a reference to the entry's key, either within the map if occupied, + /// or else the new key that was used to find the entry. + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Modifies the entry if it is occupied. + pub fn and_modify(mut self, f: F) -> Self + where + F: FnOnce(&mut V), + { + if let Entry::Occupied(entry) = &mut self { + f(entry.get_mut()); + } + self + } + + /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable + /// reference to it. Otherwise a mutable reference to an already existent value is returned. + /// + /// Computes in **O(1)** time (amortized average). + pub fn or_default(self) -> &'a mut V + where + V: Default, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(V::default()), + } + } +} + +impl fmt::Debug for Entry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut tuple = f.debug_tuple("Entry"); + match self { + Entry::Vacant(v) => tuple.field(v), + Entry::Occupied(o) => tuple.field(o), + }; + tuple.finish() + } +} + +/// A view into an occupied entry in an [`IndexMap`][crate::IndexMap]. +/// It is part of the [`Entry`] enum. +pub struct OccupiedEntry<'a, K, V> { + entries: &'a mut Entries, + index: hash_table::OccupiedEntry<'a, usize>, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> { + /// Return the index of the key-value pair + #[inline] + pub fn index(&self) -> usize { + *self.index.get() + } + + #[inline] + fn into_ref_mut(self) -> RefMut<'a, K, V> { + RefMut::new(self.index.into_table(), self.entries) + } + + /// Gets a reference to the entry's key in the map. + /// + /// Note that this is not the key that was used to find the entry. There may be an observable + /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like + /// extra fields or the memory address of an allocation. + pub fn key(&self) -> &K { + &self.entries[self.index()].key + } + + pub(crate) fn key_mut(&mut self) -> &mut K { + let index = self.index(); + &mut self.entries[index].key + } + + /// Gets a reference to the entry's value in the map. + pub fn get(&self) -> &V { + &self.entries[self.index()].value + } + + /// Gets a mutable reference to the entry's value in the map. + /// + /// If you need a reference which may outlive the destruction of the + /// [`Entry`] value, see [`into_mut`][Self::into_mut]. + pub fn get_mut(&mut self) -> &mut V { + let index = self.index(); + &mut self.entries[index].value + } + + /// Converts into a mutable reference to the entry's value in the map, + /// with a lifetime bound to the map itself. + pub fn into_mut(self) -> &'a mut V { + let index = self.index(); + &mut self.entries[index].value + } + + /// Sets the value of the entry to `value`, and returns the entry's old value. + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this + /// entry's position with the last element, and it is deprecated in favor of calling that + /// explicitly. If you need to preserve the relative order of the keys in the map, use + /// [`.shift_remove()`][Self::shift_remove] instead. + #[deprecated(note = "`remove` disrupts the map order -- \ + use `swap_remove` or `shift_remove` for explicit behavior.")] + pub fn remove(self) -> V { + self.swap_remove() + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with + /// the last element of the map and popping it off. + /// **This perturbs the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(self) -> V { + self.swap_remove_entry().1 + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(self) -> V { + self.shift_remove_entry().1 + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry], + /// replacing this entry's position with the last element, and it is deprecated in favor of + /// calling that explicitly. If you need to preserve the relative order of the keys in the map, + /// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead. + #[deprecated(note = "`remove_entry` disrupts the map order -- \ + use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] + pub fn remove_entry(self) -> (K, V) { + self.swap_remove_entry() + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with + /// the last element of the map and popping it off. + /// **This perturbs the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_entry(self) -> (K, V) { + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).swap_remove_finish(index) + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_entry(self) -> (K, V) { + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).shift_remove_finish(index) + } + + /// Moves the position of the entry to a new index + /// by shifting all other entries in-between. + /// + /// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`] + /// coming `from` the current [`.index()`][Self::index]. + /// + /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. + /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. + /// + /// ***Panics*** if `to` is out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn move_index(self, to: usize) { + let index = self.index(); + self.into_ref_mut().move_index(index, to); + } + + /// Swaps the position of entry with another. + /// + /// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`] + /// with the current [`.index()`][Self::index] as one of the two being swapped. + /// + /// ***Panics*** if the `other` index is out of bounds. + /// + /// Computes in **O(1)** time (average). + pub fn swap_indices(self, other: usize) { + let index = self.index(); + self.into_ref_mut().swap_indices(index, other); + } +} + +impl fmt::Debug for OccupiedEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +impl<'a, K, V> From> for OccupiedEntry<'a, K, V> { + fn from(other: IndexedEntry<'a, K, V>) -> Self { + let IndexedEntry { + map: RefMut { indices, entries }, + index, + } = other; + let hash = entries[index].hash; + Self { + entries, + index: indices + .find_entry(hash.get(), move |&i| i == index) + .expect("index not found"), + } + } +} + +/// A view into a vacant entry in an [`IndexMap`][crate::IndexMap]. +/// It is part of the [`Entry`] enum. +pub struct VacantEntry<'a, K, V> { + map: RefMut<'a, K, V>, + hash: HashValue, + key: K, +} + +impl<'a, K, V> VacantEntry<'a, K, V> { + /// Return the index where a key-value pair may be inserted. + pub fn index(&self) -> usize { + self.map.indices.len() + } + + /// Gets a reference to the key that was used to find the entry. + pub fn key(&self) -> &K { + &self.key + } + + pub(crate) fn key_mut(&mut self) -> &mut K { + &mut self.key + } + + /// Takes ownership of the key, leaving the entry vacant. + pub fn into_key(self) -> K { + self.key + } + + /// Inserts the entry's key and the given value into the map, and returns a mutable reference + /// to the value. + pub fn insert(mut self, value: V) -> &'a mut V { + let i = self.map.insert_unique(self.hash, self.key, value); + &mut self.map.entries[i].value + } + + /// Inserts the entry's key and the given value into the map at its ordered + /// position among sorted keys, and returns the new index and a mutable + /// reference to the value. + /// + /// If the existing keys are **not** already sorted, then the insertion + /// index is unspecified (like [`slice::binary_search`]), but the key-value + /// pair is inserted at that position regardless. + /// + /// Computes in **O(n)** time (average). + pub fn insert_sorted(self, value: V) -> (usize, &'a mut V) + where + K: Ord, + { + let slice = crate::map::Slice::from_slice(self.map.entries); + let i = slice.binary_search_keys(&self.key).unwrap_err(); + (i, self.shift_insert(i, value)) + } + + /// Inserts the entry's key and the given value into the map at the given index, + /// shifting others to the right, and returns a mutable reference to the value. + /// + /// ***Panics*** if `index` is out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn shift_insert(mut self, index: usize, value: V) -> &'a mut V { + self.map + .shift_insert_unique(index, self.hash, self.key, value); + &mut self.map.entries[index].value + } +} + +impl fmt::Debug for VacantEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +/// A view into an occupied entry in an [`IndexMap`][crate::IndexMap] obtained by index. +/// +/// This `struct` is created from the [`get_index_entry`][crate::IndexMap::get_index_entry] method. +pub struct IndexedEntry<'a, K, V> { + map: RefMut<'a, K, V>, + // We have a mutable reference to the map, which keeps the index + // valid and pointing to the correct entry. + index: usize, +} + +impl<'a, K, V> IndexedEntry<'a, K, V> { + pub(crate) fn new(map: &'a mut IndexMapCore, index: usize) -> Self { + Self { + map: map.borrow_mut(), + index, + } + } + + /// Return the index of the key-value pair + #[inline] + pub fn index(&self) -> usize { + self.index + } + + /// Gets a reference to the entry's key in the map. + pub fn key(&self) -> &K { + &self.map.entries[self.index].key + } + + pub(crate) fn key_mut(&mut self) -> &mut K { + &mut self.map.entries[self.index].key + } + + /// Gets a reference to the entry's value in the map. + pub fn get(&self) -> &V { + &self.map.entries[self.index].value + } + + /// Gets a mutable reference to the entry's value in the map. + /// + /// If you need a reference which may outlive the destruction of the + /// `IndexedEntry` value, see [`into_mut`][Self::into_mut]. + pub fn get_mut(&mut self) -> &mut V { + &mut self.map.entries[self.index].value + } + + /// Sets the value of the entry to `value`, and returns the entry's old value. + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Converts into a mutable reference to the entry's value in the map, + /// with a lifetime bound to the map itself. + pub fn into_mut(self) -> &'a mut V { + &mut self.map.entries[self.index].value + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with + /// the last element of the map and popping it off. + /// **This perturbs the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_entry(mut self) -> (K, V) { + self.map.swap_remove_index(self.index).unwrap() + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_entry(mut self) -> (K, V) { + self.map.shift_remove_index(self.index).unwrap() + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with + /// the last element of the map and popping it off. + /// **This perturbs the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(self) -> V { + self.swap_remove_entry().1 + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(self) -> V { + self.shift_remove_entry().1 + } + + /// Moves the position of the entry to a new index + /// by shifting all other entries in-between. + /// + /// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`] + /// coming `from` the current [`.index()`][Self::index]. + /// + /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. + /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. + /// + /// ***Panics*** if `to` is out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn move_index(mut self, to: usize) { + self.map.move_index(self.index, to); + } + + /// Swaps the position of entry with another. + /// + /// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`] + /// with the current [`.index()`][Self::index] as one of the two being swapped. + /// + /// ***Panics*** if the `other` index is out of bounds. + /// + /// Computes in **O(1)** time (average). + pub fn swap_indices(mut self, other: usize) { + self.map.swap_indices(self.index, other); + } +} + +impl fmt::Debug for IndexedEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("IndexedEntry") + .field("index", &self.index) + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +impl<'a, K, V> From> for IndexedEntry<'a, K, V> { + fn from(other: OccupiedEntry<'a, K, V>) -> Self { + Self { + index: other.index(), + map: other.into_ref_mut(), + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core/raw_entry_v1.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core/raw_entry_v1.rs new file mode 100644 index 000000000000..5d73469d7cac --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/core/raw_entry_v1.rs @@ -0,0 +1,666 @@ +//! Opt-in access to the experimental raw entry API. +//! +//! This module is designed to mimic the raw entry API of [`HashMap`][std::collections::hash_map], +//! matching its unstable state as of Rust 1.75. See the tracking issue +//! [rust#56167](https://github.com/rust-lang/rust/issues/56167) for more details. +//! +//! The trait [`RawEntryApiV1`] and the `_v1` suffix on its methods are meant to insulate this for +//! the future, in case later breaking changes are needed. If the standard library stabilizes its +//! `hash_raw_entry` feature (or some replacement), matching *inherent* methods will be added to +//! `IndexMap` without such an opt-in trait. + +use super::{Entries, RefMut}; +use crate::{Equivalent, HashValue, IndexMap}; +use core::fmt; +use core::hash::{BuildHasher, Hash, Hasher}; +use core::marker::PhantomData; +use core::mem; +use hashbrown::hash_table; + +/// Opt-in access to the experimental raw entry API. +/// +/// See the [`raw_entry_v1`][self] module documentation for more information. +pub trait RawEntryApiV1: private::Sealed { + /// Creates a raw immutable entry builder for the [`IndexMap`]. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. + /// + /// This is useful for + /// * Hash memoization + /// * Using a search key that doesn't work with the [`Equivalent`] trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Unless you are in such a situation, higher-level and more foolproof APIs like + /// [`get`][IndexMap::get] should be preferred. + /// + /// Immutable raw entries have very limited use; you might instead want + /// [`raw_entry_mut_v1`][Self::raw_entry_mut_v1]. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use indexmap::map::{IndexMap, RawEntryApiV1}; + /// + /// let mut map = IndexMap::new(); + /// map.extend([("a", 100), ("b", 200), ("c", 300)]); + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// for k in ["a", "b", "c", "d", "e", "f"] { + /// let hash = compute_hash(map.hasher(), k); + /// let i = map.get_index_of(k); + /// let v = map.get(k); + /// let kv = map.get_key_value(k); + /// let ikv = map.get_full(k); + /// + /// println!("Key: {} and value: {:?}", k, v); + /// + /// assert_eq!(map.raw_entry_v1().from_key(k), kv); + /// assert_eq!(map.raw_entry_v1().from_hash(hash, |q| *q == k), kv); + /// assert_eq!(map.raw_entry_v1().from_key_hashed_nocheck(hash, k), kv); + /// assert_eq!(map.raw_entry_v1().from_hash_full(hash, |q| *q == k), ikv); + /// assert_eq!(map.raw_entry_v1().index_from_hash(hash, |q| *q == k), i); + /// } + /// ``` + fn raw_entry_v1(&self) -> RawEntryBuilder<'_, K, V, S>; + + /// Creates a raw entry builder for the [`IndexMap`]. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. After this, insertions into a vacant entry + /// still require an owned key to be provided. + /// + /// Raw entries are useful for such exotic situations as: + /// + /// * Hash memoization + /// * Deferring the creation of an owned key until it is known to be required + /// * Using a search key that doesn't work with the [`Equivalent`] trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Because raw entries provide much more low-level control, it's much easier + /// to put the `IndexMap` into an inconsistent state which, while memory-safe, + /// will cause the map to produce seemingly random results. Higher-level and more + /// foolproof APIs like [`entry`][IndexMap::entry] should be preferred when possible. + /// + /// Raw entries give mutable access to the keys. This must not be used + /// to modify how the key would compare or hash, as the map will not re-evaluate + /// where the key should go, meaning the keys may become "lost" if their + /// location does not reflect their state. For instance, if you change a key + /// so that the map now contains keys which compare equal, search may start + /// acting erratically, with two keys randomly masking each other. Implementations + /// are free to assume this doesn't happen (within the limits of memory-safety). + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use indexmap::map::{IndexMap, RawEntryApiV1}; + /// use indexmap::map::raw_entry_v1::RawEntryMut; + /// + /// let mut map = IndexMap::new(); + /// map.extend([("a", 100), ("b", 200), ("c", 300)]); + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// // Existing key (insert and update) + /// match map.raw_entry_mut_v1().from_key("a") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(mut view) => { + /// assert_eq!(view.index(), 0); + /// assert_eq!(view.get(), &100); + /// let v = view.get_mut(); + /// let new_v = (*v) * 10; + /// *v = new_v; + /// assert_eq!(view.insert(1111), 1000); + /// } + /// } + /// + /// assert_eq!(map["a"], 1111); + /// assert_eq!(map.len(), 3); + /// + /// // Existing key (take) + /// let hash = compute_hash(map.hasher(), "c"); + /// match map.raw_entry_mut_v1().from_key_hashed_nocheck(hash, "c") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(view) => { + /// assert_eq!(view.index(), 2); + /// assert_eq!(view.shift_remove_entry(), ("c", 300)); + /// } + /// } + /// assert_eq!(map.raw_entry_v1().from_key("c"), None); + /// assert_eq!(map.len(), 2); + /// + /// // Nonexistent key (insert and update) + /// let key = "d"; + /// let hash = compute_hash(map.hasher(), key); + /// match map.raw_entry_mut_v1().from_hash(hash, |q| *q == key) { + /// RawEntryMut::Occupied(_) => unreachable!(), + /// RawEntryMut::Vacant(view) => { + /// assert_eq!(view.index(), 2); + /// let (k, value) = view.insert("d", 4000); + /// assert_eq!((*k, *value), ("d", 4000)); + /// *value = 40000; + /// } + /// } + /// assert_eq!(map["d"], 40000); + /// assert_eq!(map.len(), 3); + /// + /// match map.raw_entry_mut_v1().from_hash(hash, |q| *q == key) { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(view) => { + /// assert_eq!(view.index(), 2); + /// assert_eq!(view.swap_remove_entry(), ("d", 40000)); + /// } + /// } + /// assert_eq!(map.get("d"), None); + /// assert_eq!(map.len(), 2); + /// ``` + fn raw_entry_mut_v1(&mut self) -> RawEntryBuilderMut<'_, K, V, S>; +} + +impl RawEntryApiV1 for IndexMap { + fn raw_entry_v1(&self) -> RawEntryBuilder<'_, K, V, S> { + RawEntryBuilder { map: self } + } + + fn raw_entry_mut_v1(&mut self) -> RawEntryBuilderMut<'_, K, V, S> { + RawEntryBuilderMut { map: self } + } +} + +/// A builder for computing where in an [`IndexMap`] a key-value pair would be stored. +/// +/// This `struct` is created by the [`IndexMap::raw_entry_v1`] method, provided by the +/// [`RawEntryApiV1`] trait. See its documentation for more. +pub struct RawEntryBuilder<'a, K, V, S> { + map: &'a IndexMap, +} + +impl fmt::Debug for RawEntryBuilder<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish_non_exhaustive() + } +} + +impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S> { + /// Access an entry by key. + pub fn from_key(self, key: &Q) -> Option<(&'a K, &'a V)> + where + S: BuildHasher, + Q: ?Sized + Hash + Equivalent, + { + self.map.get_key_value(key) + } + + /// Access an entry by a key and its hash. + pub fn from_key_hashed_nocheck(self, hash: u64, key: &Q) -> Option<(&'a K, &'a V)> + where + Q: ?Sized + Equivalent, + { + let hash = HashValue(hash as usize); + let i = self.map.core.get_index_of(hash, key)?; + self.map.get_index(i) + } + + /// Access an entry by hash. + pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + let map = self.map; + let i = self.index_from_hash(hash, is_match)?; + map.get_index(i) + } + + /// Access an entry by hash, including its index. + pub fn from_hash_full(self, hash: u64, is_match: F) -> Option<(usize, &'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + let map = self.map; + let i = self.index_from_hash(hash, is_match)?; + let (key, value) = map.get_index(i)?; + Some((i, key, value)) + } + + /// Access the index of an entry by hash. + pub fn index_from_hash(self, hash: u64, mut is_match: F) -> Option + where + F: FnMut(&K) -> bool, + { + let hash = HashValue(hash as usize); + let entries = &*self.map.core.entries; + let eq = move |&i: &usize| is_match(&entries[i].key); + self.map.core.indices.find(hash.get(), eq).copied() + } +} + +/// A builder for computing where in an [`IndexMap`] a key-value pair would be stored. +/// +/// This `struct` is created by the [`IndexMap::raw_entry_mut_v1`] method, provided by the +/// [`RawEntryApiV1`] trait. See its documentation for more. +pub struct RawEntryBuilderMut<'a, K, V, S> { + map: &'a mut IndexMap, +} + +impl fmt::Debug for RawEntryBuilderMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilderMut").finish_non_exhaustive() + } +} + +impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> { + /// Access an entry by key. + pub fn from_key(self, key: &Q) -> RawEntryMut<'a, K, V, S> + where + S: BuildHasher, + Q: ?Sized + Hash + Equivalent, + { + let hash = self.map.hash(key); + self.from_key_hashed_nocheck(hash.get(), key) + } + + /// Access an entry by a key and its hash. + pub fn from_key_hashed_nocheck(self, hash: u64, key: &Q) -> RawEntryMut<'a, K, V, S> + where + Q: ?Sized + Equivalent, + { + self.from_hash(hash, |k| Q::equivalent(key, k)) + } + + /// Access an entry by hash. + pub fn from_hash(self, hash: u64, mut is_match: F) -> RawEntryMut<'a, K, V, S> + where + F: FnMut(&K) -> bool, + { + let ref_entries = &*self.map.core.entries; + let eq = move |&i: &usize| is_match(&ref_entries[i].key); + match self.map.core.indices.find_entry(hash, eq) { + Ok(index) => RawEntryMut::Occupied(RawOccupiedEntryMut { + entries: &mut self.map.core.entries, + index, + hash_builder: PhantomData, + }), + Err(absent) => RawEntryMut::Vacant(RawVacantEntryMut { + map: RefMut::new(absent.into_table(), &mut self.map.core.entries), + hash_builder: &self.map.hash_builder, + }), + } + } +} + +/// Raw entry for an existing key-value pair or a vacant location to +/// insert one. +pub enum RawEntryMut<'a, K, V, S> { + /// Existing slot with equivalent key. + Occupied(RawOccupiedEntryMut<'a, K, V, S>), + /// Vacant slot (no equivalent key in the map). + Vacant(RawVacantEntryMut<'a, K, V, S>), +} + +impl fmt::Debug for RawEntryMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut tuple = f.debug_tuple("RawEntryMut"); + match self { + Self::Vacant(v) => tuple.field(v), + Self::Occupied(o) => tuple.field(o), + }; + tuple.finish() + } +} + +impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { + /// Return the index where the key-value pair exists or may be inserted. + #[inline] + pub fn index(&self) -> usize { + match self { + Self::Occupied(entry) => entry.index(), + Self::Vacant(entry) => entry.index(), + } + } + + /// Inserts the given default key and value in the entry if it is vacant and returns mutable + /// references to them. Otherwise mutable references to an already existent pair are returned. + pub fn or_insert(self, default_key: K, default_value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + match self { + Self::Occupied(entry) => entry.into_key_value_mut(), + Self::Vacant(entry) => entry.insert(default_key, default_value), + } + } + + /// Inserts the result of the `call` function in the entry if it is vacant and returns mutable + /// references to them. Otherwise mutable references to an already existent pair are returned. + pub fn or_insert_with(self, call: F) -> (&'a mut K, &'a mut V) + where + F: FnOnce() -> (K, V), + K: Hash, + S: BuildHasher, + { + match self { + Self::Occupied(entry) => entry.into_key_value_mut(), + Self::Vacant(entry) => { + let (key, value) = call(); + entry.insert(key, value) + } + } + } + + /// Modifies the entry if it is occupied. + pub fn and_modify(mut self, f: F) -> Self + where + F: FnOnce(&mut K, &mut V), + { + if let Self::Occupied(entry) = &mut self { + let (k, v) = entry.get_key_value_mut(); + f(k, v); + } + self + } +} + +/// A raw view into an occupied entry in an [`IndexMap`]. +/// It is part of the [`RawEntryMut`] enum. +pub struct RawOccupiedEntryMut<'a, K, V, S> { + entries: &'a mut Entries, + index: hash_table::OccupiedEntry<'a, usize>, + hash_builder: PhantomData<&'a S>, +} + +impl fmt::Debug for RawOccupiedEntryMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawOccupiedEntryMut") + .field("key", self.key()) + .field("value", self.get()) + .finish_non_exhaustive() + } +} + +impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { + /// Return the index of the key-value pair + #[inline] + pub fn index(&self) -> usize { + *self.index.get() + } + + #[inline] + fn into_ref_mut(self) -> RefMut<'a, K, V> { + RefMut::new(self.index.into_table(), self.entries) + } + + /// Gets a reference to the entry's key in the map. + /// + /// Note that this is not the key that was used to find the entry. There may be an observable + /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like + /// extra fields or the memory address of an allocation. + pub fn key(&self) -> &K { + &self.entries[self.index()].key + } + + /// Gets a mutable reference to the entry's key in the map. + /// + /// Note that this is not the key that was used to find the entry. There may be an observable + /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like + /// extra fields or the memory address of an allocation. + pub fn key_mut(&mut self) -> &mut K { + let index = self.index(); + &mut self.entries[index].key + } + + /// Converts into a mutable reference to the entry's key in the map, + /// with a lifetime bound to the map itself. + /// + /// Note that this is not the key that was used to find the entry. There may be an observable + /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like + /// extra fields or the memory address of an allocation. + pub fn into_key(self) -> &'a mut K { + let index = self.index(); + &mut self.entries[index].key + } + + /// Gets a reference to the entry's value in the map. + pub fn get(&self) -> &V { + &self.entries[self.index()].value + } + + /// Gets a mutable reference to the entry's value in the map. + /// + /// If you need a reference which may outlive the destruction of the + /// [`RawEntryMut`] value, see [`into_mut`][Self::into_mut]. + pub fn get_mut(&mut self) -> &mut V { + let index = self.index(); + &mut self.entries[index].value + } + + /// Converts into a mutable reference to the entry's value in the map, + /// with a lifetime bound to the map itself. + pub fn into_mut(self) -> &'a mut V { + let index = self.index(); + &mut self.entries[index].value + } + + /// Gets a reference to the entry's key and value in the map. + pub fn get_key_value(&self) -> (&K, &V) { + self.entries[self.index()].refs() + } + + /// Gets a reference to the entry's key and value in the map. + pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { + let index = self.index(); + self.entries[index].muts() + } + + /// Converts into a mutable reference to the entry's key and value in the map, + /// with a lifetime bound to the map itself. + pub fn into_key_value_mut(self) -> (&'a mut K, &'a mut V) { + let index = self.index(); + self.entries[index].muts() + } + + /// Sets the value of the entry, and returns the entry's old value. + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Sets the key of the entry, and returns the entry's old key. + pub fn insert_key(&mut self, key: K) -> K { + mem::replace(self.key_mut(), key) + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this + /// entry's position with the last element, and it is deprecated in favor of calling that + /// explicitly. If you need to preserve the relative order of the keys in the map, use + /// [`.shift_remove()`][Self::shift_remove] instead. + #[deprecated(note = "`remove` disrupts the map order -- \ + use `swap_remove` or `shift_remove` for explicit behavior.")] + pub fn remove(self) -> V { + self.swap_remove() + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with + /// the last element of the map and popping it off. + /// **This perturbs the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(self) -> V { + self.swap_remove_entry().1 + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(self) -> V { + self.shift_remove_entry().1 + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry], + /// replacing this entry's position with the last element, and it is deprecated in favor of + /// calling that explicitly. If you need to preserve the relative order of the keys in the map, + /// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead. + #[deprecated(note = "`remove_entry` disrupts the map order -- \ + use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] + pub fn remove_entry(self) -> (K, V) { + self.swap_remove_entry() + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like [`Vec::swap_remove`][crate::Vec::swap_remove], the pair is removed by swapping it with + /// the last element of the map and popping it off. + /// **This perturbs the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_entry(self) -> (K, V) { + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).swap_remove_finish(index) + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like [`Vec::remove`][crate::Vec::remove], the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_entry(self) -> (K, V) { + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).shift_remove_finish(index) + } + + /// Moves the position of the entry to a new index + /// by shifting all other entries in-between. + /// + /// This is equivalent to [`IndexMap::move_index`] + /// coming `from` the current [`.index()`][Self::index]. + /// + /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. + /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. + /// + /// ***Panics*** if `to` is out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn move_index(self, to: usize) { + let index = self.index(); + self.into_ref_mut().move_index(index, to); + } + + /// Swaps the position of entry with another. + /// + /// This is equivalent to [`IndexMap::swap_indices`] + /// with the current [`.index()`][Self::index] as one of the two being swapped. + /// + /// ***Panics*** if the `other` index is out of bounds. + /// + /// Computes in **O(1)** time (average). + pub fn swap_indices(self, other: usize) { + let index = self.index(); + self.into_ref_mut().swap_indices(index, other); + } +} + +/// A view into a vacant raw entry in an [`IndexMap`]. +/// It is part of the [`RawEntryMut`] enum. +pub struct RawVacantEntryMut<'a, K, V, S> { + map: RefMut<'a, K, V>, + hash_builder: &'a S, +} + +impl fmt::Debug for RawVacantEntryMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawVacantEntryMut").finish_non_exhaustive() + } +} + +impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> { + /// Return the index where a key-value pair may be inserted. + pub fn index(&self) -> usize { + self.map.indices.len() + } + + /// Inserts the given key and value into the map, + /// and returns mutable references to them. + pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let mut h = self.hash_builder.build_hasher(); + key.hash(&mut h); + self.insert_hashed_nocheck(h.finish(), key, value) + } + + /// Inserts the given key and value into the map with the provided hash, + /// and returns mutable references to them. + pub fn insert_hashed_nocheck(mut self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) { + let hash = HashValue(hash as usize); + let i = self.map.insert_unique(hash, key, value); + self.map.entries[i].muts() + } + + /// Inserts the given key and value into the map at the given index, + /// shifting others to the right, and returns mutable references to them. + /// + /// ***Panics*** if `index` is out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn shift_insert(self, index: usize, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let mut h = self.hash_builder.build_hasher(); + key.hash(&mut h); + self.shift_insert_hashed_nocheck(index, h.finish(), key, value) + } + + /// Inserts the given key and value into the map with the provided hash + /// at the given index, and returns mutable references to them. + /// + /// ***Panics*** if `index` is out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn shift_insert_hashed_nocheck( + mut self, + index: usize, + hash: u64, + key: K, + value: V, + ) -> (&'a mut K, &'a mut V) { + let hash = HashValue(hash as usize); + self.map.shift_insert_unique(index, hash, key, value); + self.map.entries[index].muts() + } +} + +mod private { + pub trait Sealed {} + + impl Sealed for super::IndexMap {} +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/iter.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/iter.rs new file mode 100644 index 000000000000..2943f18ab000 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/iter.rs @@ -0,0 +1,775 @@ +use super::core::IndexMapCore; +use super::{Bucket, Entries, IndexMap, Slice}; + +use alloc::vec::{self, Vec}; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::iter::FusedIterator; +use core::ops::{Index, RangeBounds}; +use core::slice; + +impl<'a, K, V, S> IntoIterator for &'a IndexMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut IndexMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for IndexMap { + type Item = (K, V); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self.into_entries()) + } +} + +/// An iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::iter`] method. +/// See its documentation for more. +pub struct Iter<'a, K, V> { + iter: slice::Iter<'a, Bucket>, +} + +impl<'a, K, V> Iter<'a, K, V> { + pub(super) fn new(entries: &'a [Bucket]) -> Self { + Self { + iter: entries.iter(), + } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &'a Slice { + Slice::from_slice(self.iter.as_slice()) + } +} + +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + iterator_methods!(Bucket::refs); +} + +impl DoubleEndedIterator for Iter<'_, K, V> { + double_ended_iterator_methods!(Bucket::refs); +} + +impl ExactSizeIterator for Iter<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Iter<'_, K, V> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Iter<'_, K, V> { + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Iter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Default for Iter<'_, K, V> { + fn default() -> Self { + Self { iter: [].iter() } + } +} + +/// A mutable iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::iter_mut`] method. +/// See its documentation for more. +pub struct IterMut<'a, K, V> { + iter: slice::IterMut<'a, Bucket>, +} + +impl<'a, K, V> IterMut<'a, K, V> { + pub(super) fn new(entries: &'a mut [Bucket]) -> Self { + Self { + iter: entries.iter_mut(), + } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.iter.as_slice()) + } + + /// Returns a mutable slice of the remaining entries in the iterator. + /// + /// To avoid creating `&mut` references that alias, this is forced to consume the iterator. + pub fn into_slice(self) -> &'a mut Slice { + Slice::from_mut_slice(self.iter.into_slice()) + } +} + +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + iterator_methods!(Bucket::ref_mut); +} + +impl DoubleEndedIterator for IterMut<'_, K, V> { + double_ended_iterator_methods!(Bucket::ref_mut); +} + +impl ExactSizeIterator for IterMut<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IterMut<'_, K, V> {} + +impl fmt::Debug for IterMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl Default for IterMut<'_, K, V> { + fn default() -> Self { + Self { + iter: [].iter_mut(), + } + } +} + +/// A mutable iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`MutableKeys::iter_mut2`][super::MutableKeys::iter_mut2] method. +/// See its documentation for more. +pub struct IterMut2<'a, K, V> { + iter: slice::IterMut<'a, Bucket>, +} + +impl<'a, K, V> IterMut2<'a, K, V> { + pub(super) fn new(entries: &'a mut [Bucket]) -> Self { + Self { + iter: entries.iter_mut(), + } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.iter.as_slice()) + } + + /// Returns a mutable slice of the remaining entries in the iterator. + /// + /// To avoid creating `&mut` references that alias, this is forced to consume the iterator. + pub fn into_slice(self) -> &'a mut Slice { + Slice::from_mut_slice(self.iter.into_slice()) + } +} + +impl<'a, K, V> Iterator for IterMut2<'a, K, V> { + type Item = (&'a mut K, &'a mut V); + + iterator_methods!(Bucket::muts); +} + +impl DoubleEndedIterator for IterMut2<'_, K, V> { + double_ended_iterator_methods!(Bucket::muts); +} + +impl ExactSizeIterator for IterMut2<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IterMut2<'_, K, V> {} + +impl fmt::Debug for IterMut2<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl Default for IterMut2<'_, K, V> { + fn default() -> Self { + Self { + iter: [].iter_mut(), + } + } +} + +/// An owning iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::into_iter`] method +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +#[derive(Clone)] +pub struct IntoIter { + iter: vec::IntoIter>, +} + +impl IntoIter { + pub(super) fn new(entries: Vec>) -> Self { + Self { + iter: entries.into_iter(), + } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.iter.as_slice()) + } + + /// Returns a mutable slice of the remaining entries in the iterator. + pub fn as_mut_slice(&mut self) -> &mut Slice { + Slice::from_mut_slice(self.iter.as_mut_slice()) + } +} + +impl Iterator for IntoIter { + type Item = (K, V); + + iterator_methods!(Bucket::key_value); +} + +impl DoubleEndedIterator for IntoIter { + double_ended_iterator_methods!(Bucket::key_value); +} + +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl Default for IntoIter { + fn default() -> Self { + Self { + iter: Vec::new().into_iter(), + } + } +} + +/// A draining iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::drain`] method. +/// See its documentation for more. +pub struct Drain<'a, K, V> { + iter: vec::Drain<'a, Bucket>, +} + +impl<'a, K, V> Drain<'a, K, V> { + pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { + Self { iter } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.iter.as_slice()) + } +} + +impl Iterator for Drain<'_, K, V> { + type Item = (K, V); + + iterator_methods!(Bucket::key_value); +} + +impl DoubleEndedIterator for Drain<'_, K, V> { + double_ended_iterator_methods!(Bucket::key_value); +} + +impl ExactSizeIterator for Drain<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Drain<'_, K, V> {} + +impl fmt::Debug for Drain<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +/// An iterator over the keys of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::keys`] method. +/// See its documentation for more. +pub struct Keys<'a, K, V> { + iter: slice::Iter<'a, Bucket>, +} + +impl<'a, K, V> Keys<'a, K, V> { + pub(super) fn new(entries: &'a [Bucket]) -> Self { + Self { + iter: entries.iter(), + } + } +} + +impl<'a, K, V> Iterator for Keys<'a, K, V> { + type Item = &'a K; + + iterator_methods!(Bucket::key_ref); +} + +impl DoubleEndedIterator for Keys<'_, K, V> { + double_ended_iterator_methods!(Bucket::key_ref); +} + +impl ExactSizeIterator for Keys<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Keys<'_, K, V> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Keys<'_, K, V> { + fn clone(&self) -> Self { + Keys { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Keys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Default for Keys<'_, K, V> { + fn default() -> Self { + Self { iter: [].iter() } + } +} + +/// Access [`IndexMap`] keys at indexed positions. +/// +/// While [`Index for IndexMap`][values] accesses a map's values, +/// indexing through [`IndexMap::keys`] offers an alternative to access a map's +/// keys instead. +/// +/// [values]: IndexMap#impl-Index-for-IndexMap +/// +/// Since `Keys` is also an iterator, consuming items from the iterator will +/// offset the effective indexes. Similarly, if `Keys` is obtained from +/// [`Slice::keys`], indexes will be interpreted relative to the position of +/// that slice. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// map.insert(word.to_lowercase(), word.to_uppercase()); +/// } +/// +/// assert_eq!(map[0], "LOREM"); +/// assert_eq!(map.keys()[0], "lorem"); +/// assert_eq!(map[1], "IPSUM"); +/// assert_eq!(map.keys()[1], "ipsum"); +/// +/// map.reverse(); +/// assert_eq!(map.keys()[0], "amet"); +/// assert_eq!(map.keys()[1], "sit"); +/// +/// map.sort_keys(); +/// assert_eq!(map.keys()[0], "amet"); +/// assert_eq!(map.keys()[1], "dolor"); +/// +/// // Advancing the iterator will offset the indexing +/// let mut keys = map.keys(); +/// assert_eq!(keys[0], "amet"); +/// assert_eq!(keys.next().map(|s| &**s), Some("amet")); +/// assert_eq!(keys[0], "dolor"); +/// assert_eq!(keys[1], "ipsum"); +/// +/// // Slices may have an offset as well +/// let slice = &map[2..]; +/// assert_eq!(slice[0], "IPSUM"); +/// assert_eq!(slice.keys()[0], "ipsum"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexMap; +/// +/// let mut map = IndexMap::new(); +/// map.insert("foo", 1); +/// println!("{:?}", map.keys()[10]); // panics! +/// ``` +impl<'a, K, V> Index for Keys<'a, K, V> { + type Output = K; + + /// Returns a reference to the key at the supplied `index`. + /// + /// ***Panics*** if `index` is out of bounds. + fn index(&self, index: usize) -> &K { + &self.iter.as_slice()[index].key + } +} + +/// An owning iterator over the keys of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::into_keys`] method. +/// See its documentation for more. +pub struct IntoKeys { + iter: vec::IntoIter>, +} + +impl IntoKeys { + pub(super) fn new(entries: Vec>) -> Self { + Self { + iter: entries.into_iter(), + } + } +} + +impl Iterator for IntoKeys { + type Item = K; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for IntoKeys { + double_ended_iterator_methods!(Bucket::key); +} + +impl ExactSizeIterator for IntoKeys { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoKeys {} + +impl fmt::Debug for IntoKeys { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +impl Default for IntoKeys { + fn default() -> Self { + Self { + iter: Vec::new().into_iter(), + } + } +} + +/// An iterator over the values of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::values`] method. +/// See its documentation for more. +pub struct Values<'a, K, V> { + iter: slice::Iter<'a, Bucket>, +} + +impl<'a, K, V> Values<'a, K, V> { + pub(super) fn new(entries: &'a [Bucket]) -> Self { + Self { + iter: entries.iter(), + } + } +} + +impl<'a, K, V> Iterator for Values<'a, K, V> { + type Item = &'a V; + + iterator_methods!(Bucket::value_ref); +} + +impl DoubleEndedIterator for Values<'_, K, V> { + double_ended_iterator_methods!(Bucket::value_ref); +} + +impl ExactSizeIterator for Values<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Values<'_, K, V> {} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Values<'_, K, V> { + fn clone(&self) -> Self { + Values { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Values<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Default for Values<'_, K, V> { + fn default() -> Self { + Self { iter: [].iter() } + } +} + +/// A mutable iterator over the values of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::values_mut`] method. +/// See its documentation for more. +pub struct ValuesMut<'a, K, V> { + iter: slice::IterMut<'a, Bucket>, +} + +impl<'a, K, V> ValuesMut<'a, K, V> { + pub(super) fn new(entries: &'a mut [Bucket]) -> Self { + Self { + iter: entries.iter_mut(), + } + } +} + +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + iterator_methods!(Bucket::value_mut); +} + +impl DoubleEndedIterator for ValuesMut<'_, K, V> { + double_ended_iterator_methods!(Bucket::value_mut); +} + +impl ExactSizeIterator for ValuesMut<'_, K, V> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for ValuesMut<'_, K, V> {} + +impl fmt::Debug for ValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::value_ref); + f.debug_list().entries(iter).finish() + } +} + +impl Default for ValuesMut<'_, K, V> { + fn default() -> Self { + Self { + iter: [].iter_mut(), + } + } +} + +/// An owning iterator over the values of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::into_values`] method. +/// See its documentation for more. +pub struct IntoValues { + iter: vec::IntoIter>, +} + +impl IntoValues { + pub(super) fn new(entries: Vec>) -> Self { + Self { + iter: entries.into_iter(), + } + } +} + +impl Iterator for IntoValues { + type Item = V; + + iterator_methods!(Bucket::value); +} + +impl DoubleEndedIterator for IntoValues { + double_ended_iterator_methods!(Bucket::value); +} + +impl ExactSizeIterator for IntoValues { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoValues {} + +impl fmt::Debug for IntoValues { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::value_ref); + f.debug_list().entries(iter).finish() + } +} + +impl Default for IntoValues { + fn default() -> Self { + Self { + iter: Vec::new().into_iter(), + } + } +} + +/// A splicing iterator for `IndexMap`. +/// +/// This `struct` is created by [`IndexMap::splice()`]. +/// See its documentation for more. +pub struct Splice<'a, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ + map: &'a mut IndexMap, + tail: IndexMapCore, + drain: vec::IntoIter>, + replace_with: I, +} + +impl<'a, I, K, V, S> Splice<'a, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ + pub(super) fn new(map: &'a mut IndexMap, range: R, replace_with: I) -> Self + where + R: RangeBounds, + { + let (tail, drain) = map.core.split_splice(range); + Self { + map, + tail, + drain, + replace_with, + } + } +} + +impl Drop for Splice<'_, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ + fn drop(&mut self) { + // Finish draining unconsumed items. We don't strictly *have* to do this + // manually, since we already split it into separate memory, but it will + // match the drop order of `vec::Splice` items this way. + let _ = self.drain.nth(usize::MAX); + + // Now insert all the new items. If a key matches an existing entry, it + // keeps the original position and only replaces the value, like `insert`. + while let Some((key, value)) = self.replace_with.next() { + // Since the tail is disjoint, we can try to update it first, + // or else insert (update or append) the primary map. + let hash = self.map.hash(&key); + if let Some(i) = self.tail.get_index_of(hash, &key) { + self.tail.as_entries_mut()[i].value = value; + } else { + self.map.core.insert_full(hash, key, value); + } + } + + // Finally, re-append the tail + self.map.core.append_unchecked(&mut self.tail); + } +} + +impl Iterator for Splice<'_, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ + type Item = (K, V); + + fn next(&mut self) -> Option { + self.drain.next().map(Bucket::key_value) + } + + fn size_hint(&self) -> (usize, Option) { + self.drain.size_hint() + } +} + +impl DoubleEndedIterator for Splice<'_, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.drain.next_back().map(Bucket::key_value) + } +} + +impl ExactSizeIterator for Splice<'_, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ + fn len(&self) -> usize { + self.drain.len() + } +} + +impl FusedIterator for Splice<'_, I, K, V, S> +where + I: Iterator, + K: Hash + Eq, + S: BuildHasher, +{ +} + +impl<'a, I, K, V, S> fmt::Debug for Splice<'a, I, K, V, S> +where + I: fmt::Debug + Iterator, + K: fmt::Debug + Hash + Eq, + V: fmt::Debug, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Follow `vec::Splice` in only printing the drain and replacement + f.debug_struct("Splice") + .field("drain", &self.drain) + .field("replace_with", &self.replace_with) + .finish() + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/mutable.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/mutable.rs new file mode 100644 index 000000000000..e429c8beda67 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/mutable.rs @@ -0,0 +1,166 @@ +use core::hash::{BuildHasher, Hash}; + +use super::{ + Bucket, Entries, Entry, Equivalent, IndexMap, IndexedEntry, IterMut2, OccupiedEntry, + VacantEntry, +}; + +/// Opt-in mutable access to [`IndexMap`] keys. +/// +/// These methods expose `&mut K`, mutable references to the key as it is stored +/// in the map. +/// You are allowed to modify the keys in the map **if the modification +/// does not change the key’s hash and equality**. +/// +/// If keys are modified erroneously, you can no longer look them up. +/// This is sound (memory safe) but a logical error hazard (just like +/// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). +/// +/// `use` this trait to enable its methods for `IndexMap`. +/// +/// This trait is sealed and cannot be implemented for types outside this crate. +pub trait MutableKeys: private::Sealed { + type Key; + type Value; + + /// Return item index, mutable reference to key and value + /// + /// Computes in **O(1)** time (average). + fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut Self::Key, &mut Self::Value)> + where + Q: ?Sized + Hash + Equivalent; + + /// Return mutable reference to key and value at an index. + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>; + + /// Return an iterator over the key-value pairs of the map, in their order + fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value>; + + /// Scan through each key-value pair in the map and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + fn retain2(&mut self, keep: F) + where + F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; +} + +/// Opt-in mutable access to [`IndexMap`] keys. +/// +/// See [`MutableKeys`] for more information. +impl MutableKeys for IndexMap +where + S: BuildHasher, +{ + type Key = K; + type Value = V; + + fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> + where + Q: ?Sized + Hash + Equivalent, + { + if let Some(i) = self.get_index_of(key) { + let entry = &mut self.as_entries_mut()[i]; + Some((i, &mut entry.key, &mut entry.value)) + } else { + None + } + } + + fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> { + self.as_entries_mut().get_mut(index).map(Bucket::muts) + } + + fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value> { + IterMut2::new(self.as_entries_mut()) + } + + fn retain2(&mut self, keep: F) + where + F: FnMut(&mut K, &mut V) -> bool, + { + self.core.retain_in_order(keep); + } +} + +/// Opt-in mutable access to [`Entry`] keys. +/// +/// These methods expose `&mut K`, mutable references to the key as it is stored +/// in the map. +/// You are allowed to modify the keys in the map **if the modification +/// does not change the key’s hash and equality**. +/// +/// If keys are modified erroneously, you can no longer look them up. +/// This is sound (memory safe) but a logical error hazard (just like +/// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). +/// +/// `use` this trait to enable its methods for `Entry`. +/// +/// This trait is sealed and cannot be implemented for types outside this crate. +pub trait MutableEntryKey: private::Sealed { + type Key; + + /// Gets a mutable reference to the entry's key, either within the map if occupied, + /// or else the new key that was used to find the entry. + fn key_mut(&mut self) -> &mut Self::Key; +} + +/// Opt-in mutable access to [`Entry`] keys. +/// +/// See [`MutableEntryKey`] for more information. +impl MutableEntryKey for Entry<'_, K, V> { + type Key = K; + fn key_mut(&mut self) -> &mut Self::Key { + match self { + Entry::Occupied(e) => e.key_mut(), + Entry::Vacant(e) => e.key_mut(), + } + } +} + +/// Opt-in mutable access to [`OccupiedEntry`] keys. +/// +/// See [`MutableEntryKey`] for more information. +impl MutableEntryKey for OccupiedEntry<'_, K, V> { + type Key = K; + fn key_mut(&mut self) -> &mut Self::Key { + self.key_mut() + } +} + +/// Opt-in mutable access to [`VacantEntry`] keys. +/// +/// See [`MutableEntryKey`] for more information. +impl MutableEntryKey for VacantEntry<'_, K, V> { + type Key = K; + fn key_mut(&mut self) -> &mut Self::Key { + self.key_mut() + } +} + +/// Opt-in mutable access to [`IndexedEntry`] keys. +/// +/// See [`MutableEntryKey`] for more information. +impl MutableEntryKey for IndexedEntry<'_, K, V> { + type Key = K; + fn key_mut(&mut self) -> &mut Self::Key { + self.key_mut() + } +} + +mod private { + pub trait Sealed {} + + impl Sealed for super::IndexMap {} + impl Sealed for super::Entry<'_, K, V> {} + impl Sealed for super::OccupiedEntry<'_, K, V> {} + impl Sealed for super::VacantEntry<'_, K, V> {} + impl Sealed for super::IndexedEntry<'_, K, V> {} +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/serde_seq.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/serde_seq.rs new file mode 100644 index 000000000000..602ae7dc74b6 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/serde_seq.rs @@ -0,0 +1,138 @@ +//! Functions to serialize and deserialize an [`IndexMap`] as an ordered sequence. +//! +//! The default `serde` implementation serializes `IndexMap` as a normal map, +//! but there is no guarantee that serialization formats will preserve the order +//! of the key-value pairs. This module serializes `IndexMap` as a sequence of +//! `(key, value)` elements instead, in order. +//! +//! This module may be used in a field attribute for derived implementations: +//! +//! ``` +//! # use indexmap::IndexMap; +//! # use serde_derive::{Deserialize, Serialize}; +//! #[derive(Deserialize, Serialize)] +//! struct Data { +//! #[serde(with = "indexmap::map::serde_seq")] +//! map: IndexMap, +//! // ... +//! } +//! ``` + +use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; +use serde::ser::{Serialize, Serializer}; + +use core::fmt::{self, Formatter}; +use core::hash::{BuildHasher, Hash}; +use core::marker::PhantomData; + +use crate::map::Slice as MapSlice; +use crate::serde::cautious_capacity; +use crate::set::Slice as SetSlice; +use crate::IndexMap; + +/// Serializes a [`map::Slice`][MapSlice] as an ordered sequence. +/// +/// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence +/// of `(key, value)` pairs, rather than as a map that might not preserve order. +impl Serialize for MapSlice +where + K: Serialize, + V: Serialize, +{ + fn serialize(&self, serializer: T) -> Result + where + T: Serializer, + { + serializer.collect_seq(self) + } +} + +/// Serializes a [`set::Slice`][SetSlice] as an ordered sequence. +impl Serialize for SetSlice +where + T: Serialize, +{ + fn serialize(&self, serializer: Se) -> Result + where + Se: Serializer, + { + serializer.collect_seq(self) + } +} + +/// Serializes an [`IndexMap`] as an ordered sequence. +/// +/// This function may be used in a field attribute for deriving [`Serialize`]: +/// +/// ``` +/// # use indexmap::IndexMap; +/// # use serde_derive::Serialize; +/// #[derive(Serialize)] +/// struct Data { +/// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")] +/// map: IndexMap, +/// // ... +/// } +/// ``` +pub fn serialize(map: &IndexMap, serializer: T) -> Result +where + K: Serialize, + V: Serialize, + T: Serializer, +{ + serializer.collect_seq(map) +} + +/// Visitor to deserialize a *sequenced* `IndexMap` +struct SeqVisitor(PhantomData<(K, V, S)>); + +impl<'de, K, V, S> Visitor<'de> for SeqVisitor +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher, +{ + type Value = IndexMap; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "a sequenced map") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let capacity = cautious_capacity::(seq.size_hint()); + let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default()); + + while let Some((key, value)) = seq.next_element()? { + map.insert(key, value); + } + + Ok(map) + } +} + +/// Deserializes an [`IndexMap`] from an ordered sequence. +/// +/// This function may be used in a field attribute for deriving [`Deserialize`]: +/// +/// ``` +/// # use indexmap::IndexMap; +/// # use serde_derive::Deserialize; +/// #[derive(Deserialize)] +/// struct Data { +/// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")] +/// map: IndexMap, +/// // ... +/// } +/// ``` +pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher, +{ + deserializer.deserialize_seq(SeqVisitor(PhantomData)) +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/slice.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/slice.rs new file mode 100644 index 000000000000..94795b709e0f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/slice.rs @@ -0,0 +1,539 @@ +use super::{ + Bucket, Entries, IndexMap, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, + ValuesMut, +}; +use crate::util::try_simplify_range; + +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::ops::{self, Bound, Index, IndexMut, RangeBounds}; + +/// A dynamically-sized slice of key-value pairs in an [`IndexMap`]. +/// +/// This supports indexed operations much like a `[(K, V)]` slice, +/// but not any hashed operations on the map keys. +/// +/// Unlike `IndexMap`, `Slice` does consider the order for [`PartialEq`] +/// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`]. +#[repr(transparent)] +pub struct Slice { + pub(crate) entries: [Bucket], +} + +// SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, +// and reference lifetimes are bound together in function signatures. +#[allow(unsafe_code)] +impl Slice { + pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { + unsafe { &*(entries as *const [Bucket] as *const Self) } + } + + pub(super) fn from_mut_slice(entries: &mut [Bucket]) -> &mut Self { + unsafe { &mut *(entries as *mut [Bucket] as *mut Self) } + } + + pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { + unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } + } + + fn into_boxed(self: Box) -> Box<[Bucket]> { + unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } + } +} + +impl Slice { + pub(crate) fn into_entries(self: Box) -> Vec> { + self.into_boxed().into_vec() + } + + /// Returns an empty slice. + pub const fn new<'a>() -> &'a Self { + Self::from_slice(&[]) + } + + /// Returns an empty mutable slice. + pub fn new_mut<'a>() -> &'a mut Self { + Self::from_mut_slice(&mut []) + } + + /// Return the number of key-value pairs in the map slice. + #[inline] + pub const fn len(&self) -> usize { + self.entries.len() + } + + /// Returns true if the map slice contains no elements. + #[inline] + pub const fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Get a key-value pair by index. + /// + /// Valid indices are `0 <= index < self.len()`. + pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { + self.entries.get(index).map(Bucket::refs) + } + + /// Get a key-value pair by index, with mutable access to the value. + /// + /// Valid indices are `0 <= index < self.len()`. + pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { + self.entries.get_mut(index).map(Bucket::ref_mut) + } + + /// Returns a slice of key-value pairs in the given range of indices. + /// + /// Valid indices are `0 <= index < self.len()`. + pub fn get_range>(&self, range: R) -> Option<&Self> { + let range = try_simplify_range(range, self.entries.len())?; + self.entries.get(range).map(Slice::from_slice) + } + + /// Returns a mutable slice of key-value pairs in the given range of indices. + /// + /// Valid indices are `0 <= index < self.len()`. + pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Self> { + let range = try_simplify_range(range, self.entries.len())?; + self.entries.get_mut(range).map(Slice::from_mut_slice) + } + + /// Get the first key-value pair. + pub fn first(&self) -> Option<(&K, &V)> { + self.entries.first().map(Bucket::refs) + } + + /// Get the first key-value pair, with mutable access to the value. + pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { + self.entries.first_mut().map(Bucket::ref_mut) + } + + /// Get the last key-value pair. + pub fn last(&self) -> Option<(&K, &V)> { + self.entries.last().map(Bucket::refs) + } + + /// Get the last key-value pair, with mutable access to the value. + pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { + self.entries.last_mut().map(Bucket::ref_mut) + } + + /// Divides one slice into two at an index. + /// + /// ***Panics*** if `index > len`. + pub fn split_at(&self, index: usize) -> (&Self, &Self) { + let (first, second) = self.entries.split_at(index); + (Self::from_slice(first), Self::from_slice(second)) + } + + /// Divides one mutable slice into two at an index. + /// + /// ***Panics*** if `index > len`. + pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) { + let (first, second) = self.entries.split_at_mut(index); + (Self::from_mut_slice(first), Self::from_mut_slice(second)) + } + + /// Returns the first key-value pair and the rest of the slice, + /// or `None` if it is empty. + pub fn split_first(&self) -> Option<((&K, &V), &Self)> { + if let [first, rest @ ..] = &self.entries { + Some((first.refs(), Self::from_slice(rest))) + } else { + None + } + } + + /// Returns the first key-value pair and the rest of the slice, + /// with mutable access to the value, or `None` if it is empty. + pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { + if let [first, rest @ ..] = &mut self.entries { + Some((first.ref_mut(), Self::from_mut_slice(rest))) + } else { + None + } + } + + /// Returns the last key-value pair and the rest of the slice, + /// or `None` if it is empty. + pub fn split_last(&self) -> Option<((&K, &V), &Self)> { + if let [rest @ .., last] = &self.entries { + Some((last.refs(), Self::from_slice(rest))) + } else { + None + } + } + + /// Returns the last key-value pair and the rest of the slice, + /// with mutable access to the value, or `None` if it is empty. + pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { + if let [rest @ .., last] = &mut self.entries { + Some((last.ref_mut(), Self::from_mut_slice(rest))) + } else { + None + } + } + + /// Return an iterator over the key-value pairs of the map slice. + pub fn iter(&self) -> Iter<'_, K, V> { + Iter::new(&self.entries) + } + + /// Return an iterator over the key-value pairs of the map slice. + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + IterMut::new(&mut self.entries) + } + + /// Return an iterator over the keys of the map slice. + pub fn keys(&self) -> Keys<'_, K, V> { + Keys::new(&self.entries) + } + + /// Return an owning iterator over the keys of the map slice. + pub fn into_keys(self: Box) -> IntoKeys { + IntoKeys::new(self.into_entries()) + } + + /// Return an iterator over the values of the map slice. + pub fn values(&self) -> Values<'_, K, V> { + Values::new(&self.entries) + } + + /// Return an iterator over mutable references to the the values of the map slice. + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + ValuesMut::new(&mut self.entries) + } + + /// Return an owning iterator over the values of the map slice. + pub fn into_values(self: Box) -> IntoValues { + IntoValues::new(self.into_entries()) + } + + /// Search over a sorted map for a key. + /// + /// Returns the position where that key is present, or the position where it can be inserted to + /// maintain the sort. See [`slice::binary_search`] for more details. + /// + /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up in + /// the map this is a slice from using [`IndexMap::get_index_of`], but this can also position + /// missing keys. + pub fn binary_search_keys(&self, x: &K) -> Result + where + K: Ord, + { + self.binary_search_by(|p, _| p.cmp(x)) + } + + /// Search over a sorted map with a comparator function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result + where + F: FnMut(&'a K, &'a V) -> Ordering, + { + self.entries.binary_search_by(move |a| f(&a.key, &a.value)) + } + + /// Search over a sorted map with an extraction function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result + where + F: FnMut(&'a K, &'a V) -> B, + B: Ord, + { + self.binary_search_by(|k, v| f(k, v).cmp(b)) + } + + /// Returns the index of the partition point of a sorted map according to the given predicate + /// (the index of the first element of the second partition). + /// + /// See [`slice::partition_point`] for more details. + /// + /// Computes in **O(log(n))** time. + #[must_use] + pub fn partition_point

(&self, mut pred: P) -> usize + where + P: FnMut(&K, &V) -> bool, + { + self.entries + .partition_point(move |a| pred(&a.key, &a.value)) + } +} + +impl<'a, K, V> IntoIterator for &'a Slice { + type IntoIter = Iter<'a, K, V>; + type Item = (&'a K, &'a V); + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K, V> IntoIterator for &'a mut Slice { + type IntoIter = IterMut<'a, K, V>; + type Item = (&'a K, &'a mut V); + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for Box> { + type IntoIter = IntoIter; + type Item = (K, V); + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self.into_entries()) + } +} + +impl Default for &'_ Slice { + fn default() -> Self { + Slice::from_slice(&[]) + } +} + +impl Default for &'_ mut Slice { + fn default() -> Self { + Slice::from_mut_slice(&mut []) + } +} + +impl Default for Box> { + fn default() -> Self { + Slice::from_boxed(Box::default()) + } +} + +impl Clone for Box> { + fn clone(&self) -> Self { + Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) + } +} + +impl From<&Slice> for Box> { + fn from(slice: &Slice) -> Self { + Slice::from_boxed(Box::from(&slice.entries)) + } +} + +impl fmt::Debug for Slice { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self).finish() + } +} + +impl PartialEq for Slice { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.iter().eq(other) + } +} + +impl Eq for Slice {} + +impl PartialOrd for Slice { + fn partial_cmp(&self, other: &Self) -> Option { + self.iter().partial_cmp(other) + } +} + +impl Ord for Slice { + fn cmp(&self, other: &Self) -> Ordering { + self.iter().cmp(other) + } +} + +impl Hash for Slice { + fn hash(&self, state: &mut H) { + self.len().hash(state); + for (key, value) in self { + key.hash(state); + value.hash(state); + } + } +} + +impl Index for Slice { + type Output = V; + + fn index(&self, index: usize) -> &V { + &self.entries[index].value + } +} + +impl IndexMut for Slice { + fn index_mut(&mut self, index: usize) -> &mut V { + &mut self.entries[index].value + } +} + +// We can't have `impl> Index` because that conflicts +// both upstream with `Index` and downstream with `Index<&Q>`. +// Instead, we repeat the implementations for all the core range types. +macro_rules! impl_index { + ($($range:ty),*) => {$( + impl Index<$range> for IndexMap { + type Output = Slice; + + fn index(&self, range: $range) -> &Self::Output { + Slice::from_slice(&self.as_entries()[range]) + } + } + + impl IndexMut<$range> for IndexMap { + fn index_mut(&mut self, range: $range) -> &mut Self::Output { + Slice::from_mut_slice(&mut self.as_entries_mut()[range]) + } + } + + impl Index<$range> for Slice { + type Output = Slice; + + fn index(&self, range: $range) -> &Self { + Self::from_slice(&self.entries[range]) + } + } + + impl IndexMut<$range> for Slice { + fn index_mut(&mut self, range: $range) -> &mut Self { + Self::from_mut_slice(&mut self.entries[range]) + } + } + )*} +} +impl_index!( + ops::Range, + ops::RangeFrom, + ops::RangeFull, + ops::RangeInclusive, + ops::RangeTo, + ops::RangeToInclusive, + (Bound, Bound) +); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn slice_index() { + fn check( + vec_slice: &[(i32, i32)], + map_slice: &Slice, + sub_slice: &Slice, + ) { + assert_eq!(map_slice as *const _, sub_slice as *const _); + itertools::assert_equal( + vec_slice.iter().copied(), + map_slice.iter().map(|(&k, &v)| (k, v)), + ); + itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys()); + itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values()); + } + + let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); + let map: IndexMap = vec.iter().cloned().collect(); + let slice = map.as_slice(); + + // RangeFull + check(&vec[..], &map[..], &slice[..]); + + for i in 0usize..10 { + // Index + assert_eq!(vec[i].1, map[i]); + assert_eq!(vec[i].1, slice[i]); + assert_eq!(map[&(i as i32)], map[i]); + assert_eq!(map[&(i as i32)], slice[i]); + + // RangeFrom + check(&vec[i..], &map[i..], &slice[i..]); + + // RangeTo + check(&vec[..i], &map[..i], &slice[..i]); + + // RangeToInclusive + check(&vec[..=i], &map[..=i], &slice[..=i]); + + // (Bound, Bound) + let bounds = (Bound::Excluded(i), Bound::Unbounded); + check(&vec[i + 1..], &map[bounds], &slice[bounds]); + + for j in i..=10 { + // Range + check(&vec[i..j], &map[i..j], &slice[i..j]); + } + + for j in i..10 { + // RangeInclusive + check(&vec[i..=j], &map[i..=j], &slice[i..=j]); + } + } + } + + #[test] + fn slice_index_mut() { + fn check_mut( + vec_slice: &[(i32, i32)], + map_slice: &mut Slice, + sub_slice: &mut Slice, + ) { + assert_eq!(map_slice, sub_slice); + itertools::assert_equal( + vec_slice.iter().copied(), + map_slice.iter_mut().map(|(&k, &mut v)| (k, v)), + ); + itertools::assert_equal( + vec_slice.iter().map(|&(_, v)| v), + map_slice.values_mut().map(|&mut v| v), + ); + } + + let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); + let mut map: IndexMap = vec.iter().cloned().collect(); + let mut map2 = map.clone(); + let slice = map2.as_mut_slice(); + + // RangeFull + check_mut(&vec[..], &mut map[..], &mut slice[..]); + + for i in 0usize..10 { + // IndexMut + assert_eq!(&mut map[i], &mut slice[i]); + + // RangeFrom + check_mut(&vec[i..], &mut map[i..], &mut slice[i..]); + + // RangeTo + check_mut(&vec[..i], &mut map[..i], &mut slice[..i]); + + // RangeToInclusive + check_mut(&vec[..=i], &mut map[..=i], &mut slice[..=i]); + + // (Bound, Bound) + let bounds = (Bound::Excluded(i), Bound::Unbounded); + check_mut(&vec[i + 1..], &mut map[bounds], &mut slice[bounds]); + + for j in i..=10 { + // Range + check_mut(&vec[i..j], &mut map[i..j], &mut slice[i..j]); + } + + for j in i..10 { + // RangeInclusive + check_mut(&vec[i..=j], &mut map[i..=j], &mut slice[i..=j]); + } + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/tests.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/tests.rs new file mode 100644 index 000000000000..9de9db1be6f0 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/map/tests.rs @@ -0,0 +1,830 @@ +use super::*; +use std::string::String; + +#[test] +fn it_works() { + let mut map = IndexMap::new(); + assert_eq!(map.is_empty(), true); + map.insert(1, ()); + map.insert(1, ()); + assert_eq!(map.len(), 1); + assert!(map.get(&1).is_some()); + assert_eq!(map.is_empty(), false); +} + +#[test] +fn new() { + let map = IndexMap::::new(); + println!("{:?}", map); + assert_eq!(map.capacity(), 0); + assert_eq!(map.len(), 0); + assert_eq!(map.is_empty(), true); +} + +#[test] +fn insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(map.len(), i); + map.insert(elt, elt); + assert_eq!(map.len(), i + 1); + assert_eq!(map.get(&elt), Some(&elt)); + assert_eq!(map[&elt], elt); + } + println!("{:?}", map); + + for &elt in ¬_present { + assert!(map.get(&elt).is_none()); + } +} + +#[test] +fn insert_full() { + let insert = vec![9, 2, 7, 1, 4, 6, 13]; + let present = vec![1, 6, 2]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(map.len(), i); + let (index, existing) = map.insert_full(elt, elt); + assert_eq!(existing, None); + assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); + assert_eq!(map.len(), i + 1); + } + + let len = map.len(); + for &elt in &present { + let (index, existing) = map.insert_full(elt, elt); + assert_eq!(existing, Some(elt)); + assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); + assert_eq!(map.len(), len); + } +} + +#[test] +fn insert_2() { + let mut map = IndexMap::with_capacity(16); + + let mut keys = vec![]; + keys.extend(0..16); + keys.extend(if cfg!(miri) { 32..64 } else { 128..267 }); + + for &i in &keys { + let old_map = map.clone(); + map.insert(i, ()); + for key in old_map.keys() { + if map.get(key).is_none() { + println!("old_map: {:?}", old_map); + println!("map: {:?}", map); + panic!("did not find {} in map", key); + } + } + } + + for &i in &keys { + assert!(map.get(&i).is_some(), "did not find {}", i); + } +} + +#[test] +fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, ()); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().zip(map.keys()) { + assert_eq!(a, b); + } + for (i, k) in (0..insert.len()).zip(map.keys()) { + assert_eq!(map.get_index(i).unwrap().0, k); + } +} + +#[test] +fn shift_insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.shift_insert(0, elt, ()); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().rev().zip(map.keys()) { + assert_eq!(a, b); + } + for (i, k) in (0..insert.len()).zip(map.keys()) { + assert_eq!(map.get_index(i).unwrap().0, k); + } + + // "insert" that moves an existing entry + map.shift_insert(0, insert[0], ()); + assert_eq!(map.keys().count(), insert.len()); + assert_eq!(insert[0], map.keys()[0]); + for (a, b) in insert[1..].iter().rev().zip(map.keys().skip(1)) { + assert_eq!(a, b); + } +} + +#[test] +fn insert_sorted_bad() { + let mut map = IndexMap::new(); + map.insert(10, ()); + for i in 0..10 { + map.insert(i, ()); + } + + // The binary search will want to insert this at the end (index == len()), + // but that's only possible for *new* inserts. It should still be handled + // without panicking though, and in this case it's simple enough that we + // know the exact result. (But don't read this as an API guarantee!) + assert_eq!(map.first(), Some((&10, &()))); + map.insert_sorted(10, ()); + assert_eq!(map.last(), Some((&10, &()))); + assert!(map.keys().copied().eq(0..=10)); + + // Other out-of-order entries can also "insert" to a binary-searched + // position, moving in either direction. + map.move_index(5, 0); + map.move_index(6, 10); + assert_eq!(map.first(), Some((&5, &()))); + assert_eq!(map.last(), Some((&6, &()))); + map.insert_sorted(5, ()); // moves back up + map.insert_sorted(6, ()); // moves back down + assert!(map.keys().copied().eq(0..=10)); +} + +#[test] +fn grow() { + let insert = [0, 4, 2, 12, 8, 7, 11]; + let not_present = [1, 3, 6, 9, 10]; + let mut map = IndexMap::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(map.len(), i); + map.insert(elt, elt); + assert_eq!(map.len(), i + 1); + assert_eq!(map.get(&elt), Some(&elt)); + assert_eq!(map[&elt], elt); + } + + println!("{:?}", map); + for &elt in &insert { + map.insert(elt * 10, elt); + } + for &elt in &insert { + map.insert(elt * 100, elt); + } + for (i, &elt) in insert.iter().cycle().enumerate().take(100) { + map.insert(elt * 100 + i as i32, elt); + } + println!("{:?}", map); + for &elt in ¬_present { + assert!(map.get(&elt).is_none()); + } +} + +#[test] +fn reserve() { + let mut map = IndexMap::::new(); + assert_eq!(map.capacity(), 0); + map.reserve(100); + let capacity = map.capacity(); + assert!(capacity >= 100); + for i in 0..capacity { + assert_eq!(map.len(), i); + map.insert(i, i * i); + assert_eq!(map.len(), i + 1); + assert_eq!(map.capacity(), capacity); + assert_eq!(map.get(&i), Some(&(i * i))); + } + map.insert(capacity, std::usize::MAX); + assert_eq!(map.len(), capacity + 1); + assert!(map.capacity() > capacity); + assert_eq!(map.get(&capacity), Some(&std::usize::MAX)); +} + +#[test] +fn try_reserve() { + let mut map = IndexMap::::new(); + assert_eq!(map.capacity(), 0); + assert_eq!(map.try_reserve(100), Ok(())); + assert!(map.capacity() >= 100); + assert!(map.try_reserve(usize::MAX).is_err()); +} + +#[test] +fn shrink_to_fit() { + let mut map = IndexMap::::new(); + assert_eq!(map.capacity(), 0); + for i in 0..100 { + assert_eq!(map.len(), i); + map.insert(i, i * i); + assert_eq!(map.len(), i + 1); + assert!(map.capacity() >= i + 1); + assert_eq!(map.get(&i), Some(&(i * i))); + map.shrink_to_fit(); + assert_eq!(map.len(), i + 1); + assert_eq!(map.capacity(), i + 1); + assert_eq!(map.get(&i), Some(&(i * i))); + } +} + +#[test] +fn remove() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, elt); + } + + assert_eq!(map.keys().count(), map.len()); + assert_eq!(map.keys().count(), insert.len()); + for (a, b) in insert.iter().zip(map.keys()) { + assert_eq!(a, b); + } + + let remove_fail = [99, 77]; + let remove = [4, 12, 8, 7]; + + for &key in &remove_fail { + assert!(map.swap_remove_full(&key).is_none()); + } + println!("{:?}", map); + for &key in &remove { + //println!("{:?}", map); + let index = map.get_full(&key).unwrap().0; + assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); + } + println!("{:?}", map); + + for key in &insert { + assert_eq!(map.get(key).is_some(), !remove.contains(key)); + } + assert_eq!(map.len(), insert.len() - remove.len()); + assert_eq!(map.keys().count(), insert.len() - remove.len()); +} + +#[test] +fn remove_to_empty() { + let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; + map.swap_remove(&5).unwrap(); + map.swap_remove(&4).unwrap(); + map.swap_remove(&0).unwrap(); + assert!(map.is_empty()); +} + +#[test] +fn swap_remove_index() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, elt * 2); + } + + let mut vector = insert.to_vec(); + let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; + + // check that the same swap remove sequence on vec and map + // have the same result. + for &rm in remove_sequence { + let out_vec = vector.swap_remove(rm); + let (out_map, _) = map.swap_remove_index(rm).unwrap(); + assert_eq!(out_vec, out_map); + } + assert_eq!(vector.len(), map.len()); + for (a, b) in vector.iter().zip(map.keys()) { + assert_eq!(a, b); + } +} + +#[test] +fn partial_eq_and_eq() { + let mut map_a = IndexMap::new(); + map_a.insert(1, "1"); + map_a.insert(2, "2"); + let mut map_b = map_a.clone(); + assert_eq!(map_a, map_b); + map_b.swap_remove(&1); + assert_ne!(map_a, map_b); + + let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect(); + assert_ne!(map_a, map_c); + assert_ne!(map_c, map_a); +} + +#[test] +fn extend() { + let mut map = IndexMap::new(); + map.extend(vec![(&1, &2), (&3, &4)]); + map.extend(vec![(5, 6)]); + assert_eq!( + map.into_iter().collect::>(), + vec![(1, 2), (3, 4), (5, 6)] + ); +} + +#[test] +fn entry() { + let mut map = IndexMap::new(); + + map.insert(1, "1"); + map.insert(2, "2"); + { + let e = map.entry(3); + assert_eq!(e.index(), 2); + let e = e.or_insert("3"); + assert_eq!(e, &"3"); + } + + let e = map.entry(2); + assert_eq!(e.index(), 1); + assert_eq!(e.key(), &2); + match e { + Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), + Entry::Vacant(_) => panic!(), + } + assert_eq!(e.or_insert("4"), &"2"); +} + +#[test] +fn entry_and_modify() { + let mut map = IndexMap::new(); + + map.insert(1, "1"); + map.entry(1).and_modify(|x| *x = "2"); + assert_eq!(Some(&"2"), map.get(&1)); + + map.entry(2).and_modify(|x| *x = "doesn't exist"); + assert_eq!(None, map.get(&2)); +} + +#[test] +fn entry_or_default() { + let mut map = IndexMap::new(); + + #[derive(Debug, PartialEq)] + enum TestEnum { + DefaultValue, + NonDefaultValue, + } + + impl Default for TestEnum { + fn default() -> Self { + TestEnum::DefaultValue + } + } + + map.insert(1, TestEnum::NonDefaultValue); + assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default()); + + assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default()); +} + +#[test] +fn occupied_entry_key() { + // These keys match hash and equality, but their addresses are distinct. + let (k1, k2) = (&mut 1, &mut 1); + let k1_ptr = k1 as *const i32; + let k2_ptr = k2 as *const i32; + assert_ne!(k1_ptr, k2_ptr); + + let mut map = IndexMap::new(); + map.insert(k1, "value"); + match map.entry(k2) { + Entry::Occupied(ref e) => { + // `OccupiedEntry::key` should reference the key in the map, + // not the key that was used to find the entry. + let ptr = *e.key() as *const i32; + assert_eq!(ptr, k1_ptr); + assert_ne!(ptr, k2_ptr); + } + Entry::Vacant(_) => panic!(), + } +} + +#[test] +fn get_index_entry() { + let mut map = IndexMap::new(); + + assert!(map.get_index_entry(0).is_none()); + assert!(map.first_entry().is_none()); + assert!(map.last_entry().is_none()); + + map.insert(0, "0"); + map.insert(1, "1"); + map.insert(2, "2"); + map.insert(3, "3"); + + assert!(map.get_index_entry(4).is_none()); + + { + let e = map.get_index_entry(1).unwrap(); + assert_eq!(*e.key(), 1); + assert_eq!(*e.get(), "1"); + assert_eq!(e.swap_remove(), "1"); + } + + { + let mut e = map.get_index_entry(1).unwrap(); + assert_eq!(*e.key(), 3); + assert_eq!(*e.get(), "3"); + assert_eq!(e.insert("4"), "3"); + } + + assert_eq!(*map.get(&3).unwrap(), "4"); + + { + let e = map.first_entry().unwrap(); + assert_eq!(*e.key(), 0); + assert_eq!(*e.get(), "0"); + } + + { + let e = map.last_entry().unwrap(); + assert_eq!(*e.key(), 2); + assert_eq!(*e.get(), "2"); + } +} + +#[test] +fn from_entries() { + let mut map = IndexMap::from([(1, "1"), (2, "2"), (3, "3")]); + + { + let e = match map.entry(1) { + Entry::Occupied(e) => IndexedEntry::from(e), + Entry::Vacant(_) => panic!(), + }; + assert_eq!(e.index(), 0); + assert_eq!(*e.key(), 1); + assert_eq!(*e.get(), "1"); + } + + { + let e = match map.get_index_entry(1) { + Some(e) => OccupiedEntry::from(e), + None => panic!(), + }; + assert_eq!(e.index(), 1); + assert_eq!(*e.key(), 2); + assert_eq!(*e.get(), "2"); + } +} + +#[test] +fn keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.keys().copied().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); +} + +#[test] +fn into_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let keys: Vec = map.into_keys().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); +} + +#[test] +fn values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); +} + +#[test] +fn values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: IndexMap<_, _> = vec.into_iter().collect(); + for value in map.values_mut() { + *value *= 2 + } + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); +} + +#[test] +fn into_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_iter().collect(); + let values: Vec = map.into_values().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); +} + +#[test] +fn drain_range() { + // Test the various heuristics of `erase_indices` + for range in [ + 0..0, // nothing erased + 10..90, // reinsert the few kept (..10 and 90..) + 80..90, // update the few to adjust (80..) + 20..30, // sweep everything + ] { + let mut vec = Vec::from_iter(0..100); + let mut map: IndexMap = (0..100).map(|i| (i, ())).collect(); + drop(vec.drain(range.clone())); + drop(map.drain(range)); + assert!(vec.iter().eq(map.keys())); + for (i, x) in vec.iter().enumerate() { + assert_eq!(map.get_index_of(x), Some(i)); + } + } +} + +#[test] +#[cfg(feature = "std")] +fn from_array() { + let map = IndexMap::from([(1, 2), (3, 4)]); + let mut expected = IndexMap::new(); + expected.insert(1, 2); + expected.insert(3, 4); + + assert_eq!(map, expected) +} + +#[test] +fn iter_default() { + struct K; + struct V; + fn assert_default() + where + T: Default + Iterator, + { + assert!(T::default().next().is_none()); + } + assert_default::>(); + assert_default::>(); + assert_default::>(); + assert_default::>(); + assert_default::>(); + assert_default::>(); + assert_default::>(); + assert_default::>(); + assert_default::>(); +} + +#[test] +fn test_binary_search_by() { + // adapted from std's test for binary_search + let b: IndexMap<_, i32> = [] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(0)); + + let b: IndexMap<_, i32> = [4] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&3)), Err(0)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Ok(0)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(1)); + + let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(4)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(4)); + + let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&9)), Err(6)); + + let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(5)); + + let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(5)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0)); + + let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&1)), Ok(0)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&2)), Err(1)); + assert!(match b.binary_search_by(|_, x| x.cmp(&3)) { + Ok(1..=3) => true, + _ => false, + }); + assert!(match b.binary_search_by(|_, x| x.cmp(&3)) { + Ok(1..=3) => true, + _ => false, + }); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Err(4)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(4)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Err(4)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Ok(4)); + assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Err(5)); +} + +#[test] +fn test_binary_search_by_key() { + // adapted from std's test for binary_search + let b: IndexMap<_, i32> = [] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(0)); + + let b: IndexMap<_, i32> = [4] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&3, |_, &x| x), Err(0)); + assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Ok(0)); + assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(1)); + + let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3)); + assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3)); + assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(4)); + assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(4)); + + let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&9, |_, &x| x), Err(6)); + + let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3)); + assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3)); + assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(5)); + + let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(5)); + assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0)); + + let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0)); + assert_eq!(b.binary_search_by_key(&1, |_, &x| x), Ok(0)); + assert_eq!(b.binary_search_by_key(&2, |_, &x| x), Err(1)); + assert!(match b.binary_search_by_key(&3, |_, &x| x) { + Ok(1..=3) => true, + _ => false, + }); + assert!(match b.binary_search_by_key(&3, |_, &x| x) { + Ok(1..=3) => true, + _ => false, + }); + assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Err(4)); + assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(4)); + assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Err(4)); + assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Ok(4)); + assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Err(5)); +} + +#[test] +fn test_partition_point() { + // adapted from std's test for partition_point + let b: IndexMap<_, i32> = [] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 5), 0); + + let b: IndexMap<_, i32> = [4] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 3), 0); + assert_eq!(b.partition_point(|_, &x| x < 4), 0); + assert_eq!(b.partition_point(|_, &x| x < 5), 1); + + let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 5), 3); + assert_eq!(b.partition_point(|_, &x| x < 6), 3); + assert_eq!(b.partition_point(|_, &x| x < 7), 4); + assert_eq!(b.partition_point(|_, &x| x < 8), 4); + + let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 9), 6); + + let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 6), 3); + assert_eq!(b.partition_point(|_, &x| x < 5), 3); + assert_eq!(b.partition_point(|_, &x| x < 8), 5); + + let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 7), 5); + assert_eq!(b.partition_point(|_, &x| x < 0), 0); + + let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] + .into_iter() + .enumerate() + .map(|(i, x)| (i + 100, x)) + .collect(); + assert_eq!(b.partition_point(|_, &x| x < 0), 0); + assert_eq!(b.partition_point(|_, &x| x < 1), 0); + assert_eq!(b.partition_point(|_, &x| x < 2), 1); + assert_eq!(b.partition_point(|_, &x| x < 3), 1); + assert_eq!(b.partition_point(|_, &x| x < 4), 4); + assert_eq!(b.partition_point(|_, &x| x < 5), 4); + assert_eq!(b.partition_point(|_, &x| x < 6), 4); + assert_eq!(b.partition_point(|_, &x| x < 7), 4); + assert_eq!(b.partition_point(|_, &x| x < 8), 5); +} + +macro_rules! move_index_oob { + ($test:ident, $from:expr, $to:expr) => { + #[test] + #[should_panic(expected = "index out of bounds")] + fn $test() { + let mut map: IndexMap = (0..10).map(|k| (k, ())).collect(); + map.move_index($from, $to); + } + }; +} +move_index_oob!(test_move_index_out_of_bounds_0_10, 0, 10); +move_index_oob!(test_move_index_out_of_bounds_0_max, 0, usize::MAX); +move_index_oob!(test_move_index_out_of_bounds_10_0, 10, 0); +move_index_oob!(test_move_index_out_of_bounds_max_0, usize::MAX, 0); diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/map.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/map.rs new file mode 100644 index 000000000000..8236cf70f076 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/map.rs @@ -0,0 +1,663 @@ +//! Parallel iterator types for [`IndexMap`] with [`rayon`][::rayon]. +//! +//! You will rarely need to interact with this module directly unless you need to name one of the +//! iterator types. + +use super::collect; +use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; +use rayon::prelude::*; + +use crate::vec::Vec; +use alloc::boxed::Box; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::ops::RangeBounds; + +use crate::map::Slice; +use crate::Bucket; +use crate::Entries; +use crate::IndexMap; + +impl IntoParallelIterator for IndexMap +where + K: Send, + V: Send, +{ + type Item = (K, V); + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + entries: self.into_entries(), + } + } +} + +impl IntoParallelIterator for Box> +where + K: Send, + V: Send, +{ + type Item = (K, V); + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + entries: self.into_entries(), + } + } +} + +/// A parallel owning iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::into_par_iter`] method +/// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. +pub struct IntoParIter { + entries: Vec>, +} + +impl fmt::Debug for IntoParIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl ParallelIterator for IntoParIter { + type Item = (K, V); + + parallel_iterator_methods!(Bucket::key_value); +} + +impl IndexedParallelIterator for IntoParIter { + indexed_parallel_iterator_methods!(Bucket::key_value); +} + +impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap +where + K: Sync, + V: Sync, +{ + type Item = (&'a K, &'a V); + type Iter = ParIter<'a, K, V>; + + fn into_par_iter(self) -> Self::Iter { + ParIter { + entries: self.as_entries(), + } + } +} + +impl<'a, K, V> IntoParallelIterator for &'a Slice +where + K: Sync, + V: Sync, +{ + type Item = (&'a K, &'a V); + type Iter = ParIter<'a, K, V>; + + fn into_par_iter(self) -> Self::Iter { + ParIter { + entries: &self.entries, + } + } +} + +/// A parallel iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::par_iter`] method +/// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. +/// +/// [`IndexMap::par_iter`]: ../struct.IndexMap.html#method.par_iter +pub struct ParIter<'a, K, V> { + entries: &'a [Bucket], +} + +impl Clone for ParIter<'_, K, V> { + fn clone(&self) -> Self { + ParIter { ..*self } + } +} + +impl fmt::Debug for ParIter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { + type Item = (&'a K, &'a V); + + parallel_iterator_methods!(Bucket::refs); +} + +impl IndexedParallelIterator for ParIter<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::refs); +} + +impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap +where + K: Sync + Send, + V: Send, +{ + type Item = (&'a K, &'a mut V); + type Iter = ParIterMut<'a, K, V>; + + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + entries: self.as_entries_mut(), + } + } +} + +impl<'a, K, V> IntoParallelIterator for &'a mut Slice +where + K: Sync + Send, + V: Send, +{ + type Item = (&'a K, &'a mut V); + type Iter = ParIterMut<'a, K, V>; + + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + entries: &mut self.entries, + } + } +} + +/// A parallel mutable iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::par_iter_mut`] method +/// (provided by rayon's [`IntoParallelRefMutIterator`] trait). See its documentation for more. +/// +/// [`IndexMap::par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut +pub struct ParIterMut<'a, K, V> { + entries: &'a mut [Bucket], +} + +impl fmt::Debug for ParIterMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::refs); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + parallel_iterator_methods!(Bucket::ref_mut); +} + +impl IndexedParallelIterator for ParIterMut<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::ref_mut); +} + +impl<'a, K, V, S> ParallelDrainRange for &'a mut IndexMap +where + K: Send, + V: Send, +{ + type Item = (K, V); + type Iter = ParDrain<'a, K, V>; + + fn par_drain>(self, range: R) -> Self::Iter { + ParDrain { + entries: self.core.par_drain(range), + } + } +} + +/// A parallel draining iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::par_drain`] method +/// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. +/// +/// [`IndexMap::par_drain`]: ../struct.IndexMap.html#method.par_drain +pub struct ParDrain<'a, K: Send, V: Send> { + entries: rayon::vec::Drain<'a, Bucket>, +} + +impl ParallelIterator for ParDrain<'_, K, V> { + type Item = (K, V); + + parallel_iterator_methods!(Bucket::key_value); +} + +impl IndexedParallelIterator for ParDrain<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::key_value); +} + +/// Parallel iterator methods and other parallel methods. +/// +/// The following methods **require crate feature `"rayon"`**. +/// +/// See also the `IntoParallelIterator` implementations. +impl IndexMap +where + K: Sync, + V: Sync, +{ + /// Return a parallel iterator over the keys of the map. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the map is still preserved for operations like `reduce` and `collect`. + pub fn par_keys(&self) -> ParKeys<'_, K, V> { + ParKeys { + entries: self.as_entries(), + } + } + + /// Return a parallel iterator over the values of the map. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the map is still preserved for operations like `reduce` and `collect`. + pub fn par_values(&self) -> ParValues<'_, K, V> { + ParValues { + entries: self.as_entries(), + } + } +} + +/// Parallel iterator methods and other parallel methods. +/// +/// The following methods **require crate feature `"rayon"`**. +/// +/// See also the `IntoParallelIterator` implementations. +impl Slice +where + K: Sync, + V: Sync, +{ + /// Return a parallel iterator over the keys of the map slice. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the slice is still preserved for operations like `reduce` and `collect`. + pub fn par_keys(&self) -> ParKeys<'_, K, V> { + ParKeys { + entries: &self.entries, + } + } + + /// Return a parallel iterator over the values of the map slice. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the slice is still preserved for operations like `reduce` and `collect`. + pub fn par_values(&self) -> ParValues<'_, K, V> { + ParValues { + entries: &self.entries, + } + } +} + +impl IndexMap +where + K: Hash + Eq + Sync, + V: Sync, + S: BuildHasher, +{ + /// Returns `true` if `self` contains all of the same key-value pairs as `other`, + /// regardless of each map's indexed order, determined in parallel. + pub fn par_eq(&self, other: &IndexMap) -> bool + where + V: PartialEq, + V2: Sync, + S2: BuildHasher + Sync, + { + self.len() == other.len() + && self + .par_iter() + .all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +/// A parallel iterator over the keys of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::par_keys`] method. +/// See its documentation for more. +pub struct ParKeys<'a, K, V> { + entries: &'a [Bucket], +} + +impl Clone for ParKeys<'_, K, V> { + fn clone(&self) -> Self { + ParKeys { ..*self } + } +} + +impl fmt::Debug for ParKeys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { + type Item = &'a K; + + parallel_iterator_methods!(Bucket::key_ref); +} + +impl IndexedParallelIterator for ParKeys<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::key_ref); +} + +/// A parallel iterator over the values of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::par_values`] method. +/// See its documentation for more. +pub struct ParValues<'a, K, V> { + entries: &'a [Bucket], +} + +impl Clone for ParValues<'_, K, V> { + fn clone(&self) -> Self { + ParValues { ..*self } + } +} + +impl fmt::Debug for ParValues<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::value_ref); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { + type Item = &'a V; + + parallel_iterator_methods!(Bucket::value_ref); +} + +impl IndexedParallelIterator for ParValues<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::value_ref); +} + +impl IndexMap +where + K: Send, + V: Send, +{ + /// Return a parallel iterator over mutable references to the values of the map + /// + /// While parallel iterators can process items in any order, their relative order + /// in the map is still preserved for operations like `reduce` and `collect`. + pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { + ParValuesMut { + entries: self.as_entries_mut(), + } + } +} + +impl Slice +where + K: Send, + V: Send, +{ + /// Return a parallel iterator over mutable references to the the values of the map slice. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the slice is still preserved for operations like `reduce` and `collect`. + pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { + ParValuesMut { + entries: &mut self.entries, + } + } +} + +impl IndexMap +where + K: Send, + V: Send, +{ + /// Sort the map’s key-value pairs in parallel, by the default ordering of the keys. + pub fn par_sort_keys(&mut self) + where + K: Ord, + { + self.with_entries(|entries| { + entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key)); + }); + } + + /// Sort the map’s key-value pairs in place and in parallel, using the comparison + /// function `cmp`. + /// + /// The comparison function receives two key and value pairs to compare (you + /// can sort by keys or values or their combination as needed). + pub fn par_sort_by(&mut self, cmp: F) + where + F: Fn(&K, &V, &K, &V) -> Ordering + Sync, + { + self.with_entries(|entries| { + entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + }); + } + + /// Sort the key-value pairs of the map in parallel and return a by-value parallel + /// iterator of the key-value pairs with the result. + pub fn par_sorted_by(self, cmp: F) -> IntoParIter + where + F: Fn(&K, &V, &K, &V) -> Ordering + Sync, + { + let mut entries = self.into_entries(); + entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + IntoParIter { entries } + } + + /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. + pub fn par_sort_unstable_keys(&mut self) + where + K: Ord, + { + self.with_entries(|entries| { + entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key)); + }); + } + + /// Sort the map's key-value pairs in place and in parallel, using the comparison + /// function `cmp`. + /// + /// The comparison function receives two key and value pairs to compare (you + /// can sort by keys or values or their combination as needed). + pub fn par_sort_unstable_by(&mut self, cmp: F) + where + F: Fn(&K, &V, &K, &V) -> Ordering + Sync, + { + self.with_entries(|entries| { + entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + }); + } + + /// Sort the key-value pairs of the map in parallel and return a by-value parallel + /// iterator of the key-value pairs with the result. + pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter + where + F: Fn(&K, &V, &K, &V) -> Ordering + Sync, + { + let mut entries = self.into_entries(); + entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + IntoParIter { entries } + } + + /// Sort the map’s key-value pairs in place and in parallel, using a sort-key extraction + /// function. + pub fn par_sort_by_cached_key(&mut self, sort_key: F) + where + T: Ord + Send, + F: Fn(&K, &V) -> T + Sync, + { + self.with_entries(move |entries| { + entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); + }); + } +} + +/// A parallel mutable iterator over the values of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::par_values_mut`] method. +/// See its documentation for more. +pub struct ParValuesMut<'a, K, V> { + entries: &'a mut [Bucket], +} + +impl fmt::Debug for ParValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::value_ref); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { + type Item = &'a mut V; + + parallel_iterator_methods!(Bucket::value_mut); +} + +impl IndexedParallelIterator for ParValuesMut<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::value_mut); +} + +impl FromParallelIterator<(K, V)> for IndexMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher + Default + Send, +{ + fn from_par_iter(iter: I) -> Self + where + I: IntoParallelIterator, + { + let list = collect(iter); + let len = list.iter().map(Vec::len).sum(); + let mut map = Self::with_capacity_and_hasher(len, S::default()); + for vec in list { + map.extend(vec); + } + map + } +} + +impl ParallelExtend<(K, V)> for IndexMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher + Send, +{ + fn par_extend(&mut self, iter: I) + where + I: IntoParallelIterator, + { + for vec in collect(iter) { + self.extend(vec); + } + } +} + +impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap +where + K: Copy + Eq + Hash + Send + Sync, + V: Copy + Send + Sync, + S: BuildHasher + Send, +{ + fn par_extend(&mut self, iter: I) + where + I: IntoParallelIterator, + { + for vec in collect(iter) { + self.extend(vec); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::string::String; + + #[test] + fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut map = IndexMap::new(); + + for &elt in &insert { + map.insert(elt, ()); + } + + assert_eq!(map.par_keys().count(), map.len()); + assert_eq!(map.par_keys().count(), insert.len()); + insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| { + assert_eq!(a, b); + }); + (0..insert.len()) + .into_par_iter() + .zip(map.par_keys()) + .for_each(|(i, k)| { + assert_eq!(map.get_index(i).unwrap().0, k); + }); + } + + #[test] + fn partial_eq_and_eq() { + let mut map_a = IndexMap::new(); + map_a.insert(1, "1"); + map_a.insert(2, "2"); + let mut map_b = map_a.clone(); + assert!(map_a.par_eq(&map_b)); + map_b.swap_remove(&1); + assert!(!map_a.par_eq(&map_b)); + map_b.insert(3, "3"); + assert!(!map_a.par_eq(&map_b)); + + let map_c: IndexMap<_, String> = + map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect(); + assert!(!map_a.par_eq(&map_c)); + assert!(!map_c.par_eq(&map_a)); + } + + #[test] + fn extend() { + let mut map = IndexMap::new(); + map.par_extend(vec![(&1, &2), (&3, &4)]); + map.par_extend(vec![(5, 6)]); + assert_eq!( + map.into_par_iter().collect::>(), + vec![(1, 2), (3, 4), (5, 6)] + ); + } + + #[test] + fn keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_par_iter().collect(); + let keys: Vec<_> = map.par_keys().copied().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: IndexMap<_, _> = vec.into_par_iter().collect(); + let values: Vec<_> = map.par_values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: IndexMap<_, _> = vec.into_par_iter().collect(); + map.par_values_mut().for_each(|value| *value *= 2); + let values: Vec<_> = map.par_values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/mod.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/mod.rs new file mode 100644 index 000000000000..1d21569c1894 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/mod.rs @@ -0,0 +1,29 @@ +#![cfg_attr(docsrs, doc(cfg(feature = "rayon")))] + +use rayon::prelude::*; + +use alloc::collections::LinkedList; + +use crate::vec::Vec; + +pub mod map; +pub mod set; + +// This form of intermediate collection is also how Rayon collects `HashMap`. +// Note that the order will also be preserved! +fn collect(iter: I) -> LinkedList> { + iter.into_par_iter() + .fold(Vec::new, |mut vec, elem| { + vec.push(elem); + vec + }) + .map(|vec| { + let mut list = LinkedList::new(); + list.push_back(vec); + list + }) + .reduce(LinkedList::new, |mut list1, mut list2| { + list1.append(&mut list2); + list1 + }) +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/set.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/set.rs new file mode 100644 index 000000000000..3904234b20b7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rayon/set.rs @@ -0,0 +1,756 @@ +//! Parallel iterator types for [`IndexSet`] with [rayon][::rayon]. +//! +//! You will rarely need to interact with this module directly unless you need to name one of the +//! iterator types. + +use super::collect; +use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; +use rayon::prelude::*; + +use crate::vec::Vec; +use alloc::boxed::Box; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::ops::RangeBounds; + +use crate::set::Slice; +use crate::Entries; +use crate::IndexSet; + +type Bucket = crate::Bucket; + +impl IntoParallelIterator for IndexSet +where + T: Send, +{ + type Item = T; + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + entries: self.into_entries(), + } + } +} + +impl IntoParallelIterator for Box> +where + T: Send, +{ + type Item = T; + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + entries: self.into_entries(), + } + } +} + +/// A parallel owning iterator over the items of an [`IndexSet`]. +/// +/// This `struct` is created by the [`IndexSet::into_par_iter`] method +/// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. +pub struct IntoParIter { + entries: Vec>, +} + +impl fmt::Debug for IntoParIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +impl ParallelIterator for IntoParIter { + type Item = T; + + parallel_iterator_methods!(Bucket::key); +} + +impl IndexedParallelIterator for IntoParIter { + indexed_parallel_iterator_methods!(Bucket::key); +} + +impl<'a, T, S> IntoParallelIterator for &'a IndexSet +where + T: Sync, +{ + type Item = &'a T; + type Iter = ParIter<'a, T>; + + fn into_par_iter(self) -> Self::Iter { + ParIter { + entries: self.as_entries(), + } + } +} + +impl<'a, T> IntoParallelIterator for &'a Slice +where + T: Sync, +{ + type Item = &'a T; + type Iter = ParIter<'a, T>; + + fn into_par_iter(self) -> Self::Iter { + ParIter { + entries: &self.entries, + } + } +} + +/// A parallel iterator over the items of an [`IndexSet`]. +/// +/// This `struct` is created by the [`IndexSet::par_iter`] method +/// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. +/// +/// [`IndexSet::par_iter`]: ../struct.IndexSet.html#method.par_iter +pub struct ParIter<'a, T> { + entries: &'a [Bucket], +} + +impl Clone for ParIter<'_, T> { + fn clone(&self) -> Self { + ParIter { ..*self } + } +} + +impl fmt::Debug for ParIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.entries.iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + parallel_iterator_methods!(Bucket::key_ref); +} + +impl IndexedParallelIterator for ParIter<'_, T> { + indexed_parallel_iterator_methods!(Bucket::key_ref); +} + +impl<'a, T, S> ParallelDrainRange for &'a mut IndexSet +where + T: Send, +{ + type Item = T; + type Iter = ParDrain<'a, T>; + + fn par_drain>(self, range: R) -> Self::Iter { + ParDrain { + entries: self.map.core.par_drain(range), + } + } +} + +/// A parallel draining iterator over the items of an [`IndexSet`]. +/// +/// This `struct` is created by the [`IndexSet::par_drain`] method +/// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. +/// +/// [`IndexSet::par_drain`]: ../struct.IndexSet.html#method.par_drain +pub struct ParDrain<'a, T: Send> { + entries: rayon::vec::Drain<'a, Bucket>, +} + +impl ParallelIterator for ParDrain<'_, T> { + type Item = T; + + parallel_iterator_methods!(Bucket::key); +} + +impl IndexedParallelIterator for ParDrain<'_, T> { + indexed_parallel_iterator_methods!(Bucket::key); +} + +/// Parallel iterator methods and other parallel methods. +/// +/// The following methods **require crate feature `"rayon"`**. +/// +/// See also the `IntoParallelIterator` implementations. +impl IndexSet +where + T: Hash + Eq + Sync, + S: BuildHasher + Sync, +{ + /// Return a parallel iterator over the values that are in `self` but not `other`. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the `self` set is still preserved for operations like `reduce` and `collect`. + pub fn par_difference<'a, S2>( + &'a self, + other: &'a IndexSet, + ) -> ParDifference<'a, T, S, S2> + where + S2: BuildHasher + Sync, + { + ParDifference { + set1: self, + set2: other, + } + } + + /// Return a parallel iterator over the values that are in `self` or `other`, + /// but not in both. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the sets is still preserved for operations like `reduce` and `collect`. + /// Values from `self` are produced in their original order, followed by + /// values from `other` in their original order. + pub fn par_symmetric_difference<'a, S2>( + &'a self, + other: &'a IndexSet, + ) -> ParSymmetricDifference<'a, T, S, S2> + where + S2: BuildHasher + Sync, + { + ParSymmetricDifference { + set1: self, + set2: other, + } + } + + /// Return a parallel iterator over the values that are in both `self` and `other`. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the `self` set is still preserved for operations like `reduce` and `collect`. + pub fn par_intersection<'a, S2>( + &'a self, + other: &'a IndexSet, + ) -> ParIntersection<'a, T, S, S2> + where + S2: BuildHasher + Sync, + { + ParIntersection { + set1: self, + set2: other, + } + } + + /// Return a parallel iterator over all values that are in `self` or `other`. + /// + /// While parallel iterators can process items in any order, their relative order + /// in the sets is still preserved for operations like `reduce` and `collect`. + /// Values from `self` are produced in their original order, followed by + /// values that are unique to `other` in their original order. + pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet) -> ParUnion<'a, T, S, S2> + where + S2: BuildHasher + Sync, + { + ParUnion { + set1: self, + set2: other, + } + } + + /// Returns `true` if `self` contains all of the same values as `other`, + /// regardless of each set's indexed order, determined in parallel. + pub fn par_eq(&self, other: &IndexSet) -> bool + where + S2: BuildHasher + Sync, + { + self.len() == other.len() && self.par_is_subset(other) + } + + /// Returns `true` if `self` has no elements in common with `other`, + /// determined in parallel. + pub fn par_is_disjoint(&self, other: &IndexSet) -> bool + where + S2: BuildHasher + Sync, + { + if self.len() <= other.len() { + self.par_iter().all(move |value| !other.contains(value)) + } else { + other.par_iter().all(move |value| !self.contains(value)) + } + } + + /// Returns `true` if all elements of `other` are contained in `self`, + /// determined in parallel. + pub fn par_is_superset(&self, other: &IndexSet) -> bool + where + S2: BuildHasher + Sync, + { + other.par_is_subset(self) + } + + /// Returns `true` if all elements of `self` are contained in `other`, + /// determined in parallel. + pub fn par_is_subset(&self, other: &IndexSet) -> bool + where + S2: BuildHasher + Sync, + { + self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value)) + } +} + +/// A parallel iterator producing elements in the difference of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::par_difference`] method. +/// See its documentation for more. +pub struct ParDifference<'a, T, S1, S2> { + set1: &'a IndexSet, + set2: &'a IndexSet, +} + +impl Clone for ParDifference<'_, T, S1, S2> { + fn clone(&self) -> Self { + ParDifference { ..*self } + } +} + +impl fmt::Debug for ParDifference<'_, T, S1, S2> +where + T: fmt::Debug + Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.set1.difference(self.set2)) + .finish() + } +} + +impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2> +where + T: Hash + Eq + Sync, + S1: BuildHasher + Sync, + S2: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let Self { set1, set2 } = self; + + set1.par_iter() + .filter(move |&item| !set2.contains(item)) + .drive_unindexed(consumer) + } +} + +/// A parallel iterator producing elements in the intersection of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::par_intersection`] method. +/// See its documentation for more. +pub struct ParIntersection<'a, T, S1, S2> { + set1: &'a IndexSet, + set2: &'a IndexSet, +} + +impl Clone for ParIntersection<'_, T, S1, S2> { + fn clone(&self) -> Self { + ParIntersection { ..*self } + } +} + +impl fmt::Debug for ParIntersection<'_, T, S1, S2> +where + T: fmt::Debug + Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.set1.intersection(self.set2)) + .finish() + } +} + +impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2> +where + T: Hash + Eq + Sync, + S1: BuildHasher + Sync, + S2: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let Self { set1, set2 } = self; + + set1.par_iter() + .filter(move |&item| set2.contains(item)) + .drive_unindexed(consumer) + } +} + +/// A parallel iterator producing elements in the symmetric difference of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::par_symmetric_difference`] method. +/// See its documentation for more. +pub struct ParSymmetricDifference<'a, T, S1, S2> { + set1: &'a IndexSet, + set2: &'a IndexSet, +} + +impl Clone for ParSymmetricDifference<'_, T, S1, S2> { + fn clone(&self) -> Self { + ParSymmetricDifference { ..*self } + } +} + +impl fmt::Debug for ParSymmetricDifference<'_, T, S1, S2> +where + T: fmt::Debug + Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.set1.symmetric_difference(self.set2)) + .finish() + } +} + +impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2> +where + T: Hash + Eq + Sync, + S1: BuildHasher + Sync, + S2: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let Self { set1, set2 } = self; + + set1.par_difference(set2) + .chain(set2.par_difference(set1)) + .drive_unindexed(consumer) + } +} + +/// A parallel iterator producing elements in the union of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::par_union`] method. +/// See its documentation for more. +pub struct ParUnion<'a, T, S1, S2> { + set1: &'a IndexSet, + set2: &'a IndexSet, +} + +impl Clone for ParUnion<'_, T, S1, S2> { + fn clone(&self) -> Self { + ParUnion { ..*self } + } +} + +impl fmt::Debug for ParUnion<'_, T, S1, S2> +where + T: fmt::Debug + Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.set1.union(self.set2)).finish() + } +} + +impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2> +where + T: Hash + Eq + Sync, + S1: BuildHasher + Sync, + S2: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let Self { set1, set2 } = self; + + set1.par_iter() + .chain(set2.par_difference(set1)) + .drive_unindexed(consumer) + } +} + +/// Parallel sorting methods. +/// +/// The following methods **require crate feature `"rayon"`**. +impl IndexSet +where + T: Send, +{ + /// Sort the set’s values in parallel by their default ordering. + pub fn par_sort(&mut self) + where + T: Ord, + { + self.with_entries(|entries| { + entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key)); + }); + } + + /// Sort the set’s values in place and in parallel, using the comparison function `cmp`. + pub fn par_sort_by(&mut self, cmp: F) + where + F: Fn(&T, &T) -> Ordering + Sync, + { + self.with_entries(|entries| { + entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); + }); + } + + /// Sort the values of the set in parallel and return a by-value parallel iterator of + /// the values with the result. + pub fn par_sorted_by(self, cmp: F) -> IntoParIter + where + F: Fn(&T, &T) -> Ordering + Sync, + { + let mut entries = self.into_entries(); + entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); + IntoParIter { entries } + } + + /// Sort the set's values in parallel by their default ordering. + pub fn par_sort_unstable(&mut self) + where + T: Ord, + { + self.with_entries(|entries| { + entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key)); + }); + } + + /// Sort the set’s values in place and in parallel, using the comparison function `cmp`. + pub fn par_sort_unstable_by(&mut self, cmp: F) + where + F: Fn(&T, &T) -> Ordering + Sync, + { + self.with_entries(|entries| { + entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); + }); + } + + /// Sort the values of the set in parallel and return a by-value parallel iterator of + /// the values with the result. + pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter + where + F: Fn(&T, &T) -> Ordering + Sync, + { + let mut entries = self.into_entries(); + entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); + IntoParIter { entries } + } + + /// Sort the set’s values in place and in parallel, using a key extraction function. + pub fn par_sort_by_cached_key(&mut self, sort_key: F) + where + K: Ord + Send, + F: Fn(&T) -> K + Sync, + { + self.with_entries(move |entries| { + entries.par_sort_by_cached_key(move |a| sort_key(&a.key)); + }); + } +} + +impl FromParallelIterator for IndexSet +where + T: Eq + Hash + Send, + S: BuildHasher + Default + Send, +{ + fn from_par_iter(iter: I) -> Self + where + I: IntoParallelIterator, + { + let list = collect(iter); + let len = list.iter().map(Vec::len).sum(); + let mut set = Self::with_capacity_and_hasher(len, S::default()); + for vec in list { + set.extend(vec); + } + set + } +} + +impl ParallelExtend for IndexSet +where + T: Eq + Hash + Send, + S: BuildHasher + Send, +{ + fn par_extend(&mut self, iter: I) + where + I: IntoParallelIterator, + { + for vec in collect(iter) { + self.extend(vec); + } + } +} + +impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet +where + T: Copy + Eq + Hash + Send + Sync, + S: BuildHasher + Send, +{ + fn par_extend(&mut self, iter: I) + where + I: IntoParallelIterator, + { + for vec in collect(iter) { + self.extend(vec); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.par_iter().count(), set.len()); + assert_eq!(set.par_iter().count(), insert.len()); + insert.par_iter().zip(&set).for_each(|(a, b)| { + assert_eq!(a, b); + }); + (0..insert.len()) + .into_par_iter() + .zip(&set) + .for_each(|(i, v)| { + assert_eq!(set.get_index(i).unwrap(), v); + }); + } + + #[test] + fn partial_eq_and_eq() { + let mut set_a = IndexSet::new(); + set_a.insert(1); + set_a.insert(2); + let mut set_b = set_a.clone(); + assert!(set_a.par_eq(&set_b)); + set_b.swap_remove(&1); + assert!(!set_a.par_eq(&set_b)); + set_b.insert(3); + assert!(!set_a.par_eq(&set_b)); + + let set_c: IndexSet<_> = set_b.into_par_iter().collect(); + assert!(!set_a.par_eq(&set_c)); + assert!(!set_c.par_eq(&set_a)); + } + + #[test] + fn extend() { + let mut set = IndexSet::new(); + set.par_extend(vec![&1, &2, &3, &4]); + set.par_extend(vec![5, 6]); + assert_eq!( + set.into_par_iter().collect::>(), + vec![1, 2, 3, 4, 5, 6] + ); + } + + #[test] + fn comparisons() { + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).collect(); + + assert!(!set_a.par_is_disjoint(&set_a)); + assert!(set_a.par_is_subset(&set_a)); + assert!(set_a.par_is_superset(&set_a)); + + assert!(set_a.par_is_disjoint(&set_b)); + assert!(set_b.par_is_disjoint(&set_a)); + assert!(!set_a.par_is_subset(&set_b)); + assert!(!set_b.par_is_subset(&set_a)); + assert!(!set_a.par_is_superset(&set_b)); + assert!(!set_b.par_is_superset(&set_a)); + + assert!(!set_a.par_is_disjoint(&set_c)); + assert!(!set_c.par_is_disjoint(&set_a)); + assert!(set_a.par_is_subset(&set_c)); + assert!(!set_c.par_is_subset(&set_a)); + assert!(!set_a.par_is_superset(&set_c)); + assert!(set_c.par_is_superset(&set_a)); + + assert!(!set_c.par_is_disjoint(&set_d)); + assert!(!set_d.par_is_disjoint(&set_c)); + assert!(!set_c.par_is_subset(&set_d)); + assert!(!set_d.par_is_subset(&set_c)); + assert!(!set_c.par_is_superset(&set_d)); + assert!(!set_d.par_is_superset(&set_c)); + } + + #[test] + fn iter_comparisons() { + use std::iter::empty; + + fn check<'a, I1, I2>(iter1: I1, iter2: I2) + where + I1: ParallelIterator, + I2: Iterator, + { + let v1: Vec<_> = iter1.copied().collect(); + let v2: Vec<_> = iter2.collect(); + assert_eq!(v1, v2); + } + + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + check(set_a.par_difference(&set_a), empty()); + check(set_a.par_symmetric_difference(&set_a), empty()); + check(set_a.par_intersection(&set_a), 0..3); + check(set_a.par_union(&set_a), 0..3); + + check(set_a.par_difference(&set_b), 0..3); + check(set_b.par_difference(&set_a), 3..6); + check(set_a.par_symmetric_difference(&set_b), 0..6); + check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3)); + check(set_a.par_intersection(&set_b), empty()); + check(set_b.par_intersection(&set_a), empty()); + check(set_a.par_union(&set_b), 0..6); + check(set_b.par_union(&set_a), (3..6).chain(0..3)); + + check(set_a.par_difference(&set_c), empty()); + check(set_c.par_difference(&set_a), 3..6); + check(set_a.par_symmetric_difference(&set_c), 3..6); + check(set_c.par_symmetric_difference(&set_a), 3..6); + check(set_a.par_intersection(&set_c), 0..3); + check(set_c.par_intersection(&set_a), 0..3); + check(set_a.par_union(&set_c), 0..6); + check(set_c.par_union(&set_a), 0..6); + + check(set_c.par_difference(&set_d), 0..3); + check(set_d.par_difference(&set_c), (6..9).rev()); + check( + set_c.par_symmetric_difference(&set_d), + (0..3).chain((6..9).rev()), + ); + check( + set_d.par_symmetric_difference(&set_c), + (6..9).rev().chain(0..3), + ); + check(set_c.par_intersection(&set_d), 3..6); + check(set_d.par_intersection(&set_c), (3..6).rev()); + check(set_c.par_union(&set_d), (0..6).chain((6..9).rev())); + check(set_d.par_union(&set_c), (3..9).rev().chain(0..3)); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rustc.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rustc.rs new file mode 100644 index 000000000000..b843858b325d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/rustc.rs @@ -0,0 +1,158 @@ +//! Minimal support for `rustc-rayon`, not intended for general use. + +use crate::vec::Vec; +use crate::{Bucket, Entries, IndexMap, IndexSet}; + +use rustc_rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; +use rustc_rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; + +mod map { + use super::*; + + impl IntoParallelIterator for IndexMap + where + K: Send, + V: Send, + { + type Item = (K, V); + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + entries: self.into_entries(), + } + } + } + + pub struct IntoParIter { + entries: Vec>, + } + + impl ParallelIterator for IntoParIter { + type Item = (K, V); + + parallel_iterator_methods!(Bucket::key_value); + } + + impl IndexedParallelIterator for IntoParIter { + indexed_parallel_iterator_methods!(Bucket::key_value); + } + + impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap + where + K: Sync, + V: Sync, + { + type Item = (&'a K, &'a V); + type Iter = ParIter<'a, K, V>; + + fn into_par_iter(self) -> Self::Iter { + ParIter { + entries: self.as_entries(), + } + } + } + + pub struct ParIter<'a, K, V> { + entries: &'a [Bucket], + } + + impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { + type Item = (&'a K, &'a V); + + parallel_iterator_methods!(Bucket::refs); + } + + impl IndexedParallelIterator for ParIter<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::refs); + } + + impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap + where + K: Sync + Send, + V: Send, + { + type Item = (&'a K, &'a mut V); + type Iter = ParIterMut<'a, K, V>; + + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + entries: self.as_entries_mut(), + } + } + } + + pub struct ParIterMut<'a, K, V> { + entries: &'a mut [Bucket], + } + + impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + parallel_iterator_methods!(Bucket::ref_mut); + } + + impl IndexedParallelIterator for ParIterMut<'_, K, V> { + indexed_parallel_iterator_methods!(Bucket::ref_mut); + } +} + +mod set { + use super::*; + + impl IntoParallelIterator for IndexSet + where + T: Send, + { + type Item = T; + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + entries: self.into_entries(), + } + } + } + + pub struct IntoParIter { + entries: Vec>, + } + + impl ParallelIterator for IntoParIter { + type Item = T; + + parallel_iterator_methods!(Bucket::key); + } + + impl IndexedParallelIterator for IntoParIter { + indexed_parallel_iterator_methods!(Bucket::key); + } + + impl<'a, T, S> IntoParallelIterator for &'a IndexSet + where + T: Sync, + { + type Item = &'a T; + type Iter = ParIter<'a, T>; + + fn into_par_iter(self) -> Self::Iter { + ParIter { + entries: self.as_entries(), + } + } + } + + pub struct ParIter<'a, T> { + entries: &'a [Bucket], + } + + impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + parallel_iterator_methods!(Bucket::key_ref); + } + + impl IndexedParallelIterator for ParIter<'_, T> { + indexed_parallel_iterator_methods!(Bucket::key_ref); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/serde.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/serde.rs new file mode 100644 index 000000000000..25546d531b9d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/serde.rs @@ -0,0 +1,166 @@ +#![cfg_attr(docsrs, doc(cfg(feature = "serde")))] + +use serde::de::value::{MapDeserializer, SeqDeserializer}; +use serde::de::{ + Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor, +}; +use serde::ser::{Serialize, Serializer}; + +use core::fmt::{self, Formatter}; +use core::hash::{BuildHasher, Hash}; +use core::marker::PhantomData; +use core::{cmp, mem}; + +use crate::{Bucket, IndexMap, IndexSet}; + +/// Limit our preallocated capacity from a deserializer `size_hint()`. +/// +/// We do account for the `Bucket` overhead from its saved `hash` field, but we don't count the +/// `RawTable` allocation or the fact that its raw capacity will be rounded up to a power of two. +/// The "max" is an arbitrary choice anyway, not something that needs precise adherence. +/// +/// This is based on the internal `serde::de::size_hint::cautious(hint)` function. +pub(crate) fn cautious_capacity(hint: Option) -> usize { + const MAX_PREALLOC_BYTES: usize = 1024 * 1024; + + cmp::min( + hint.unwrap_or(0), + MAX_PREALLOC_BYTES / mem::size_of::>(), + ) +} + +impl Serialize for IndexMap +where + K: Serialize, + V: Serialize, +{ + fn serialize(&self, serializer: T) -> Result + where + T: Serializer, + { + serializer.collect_map(self) + } +} + +struct IndexMapVisitor(PhantomData<(K, V, S)>); + +impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher, +{ + type Value = IndexMap; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "a map") + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let capacity = cautious_capacity::(map.size_hint()); + let mut values = IndexMap::with_capacity_and_hasher(capacity, S::default()); + + while let Some((key, value)) = map.next_entry()? { + values.insert(key, value); + } + + Ok(values) + } +} + +impl<'de, K, V, S> Deserialize<'de> for IndexMap +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: Default + BuildHasher, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_map(IndexMapVisitor(PhantomData)) + } +} + +impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap +where + K: IntoDeserializer<'de, E> + Eq + Hash, + V: IntoDeserializer<'de, E>, + S: BuildHasher, + E: Error, +{ + type Deserializer = MapDeserializer<'de, ::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + MapDeserializer::new(self.into_iter()) + } +} + +impl Serialize for IndexSet +where + T: Serialize, +{ + fn serialize(&self, serializer: Se) -> Result + where + Se: Serializer, + { + serializer.collect_seq(self) + } +} + +struct IndexSetVisitor(PhantomData<(T, S)>); + +impl<'de, T, S> Visitor<'de> for IndexSetVisitor +where + T: Deserialize<'de> + Eq + Hash, + S: Default + BuildHasher, +{ + type Value = IndexSet; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "a set") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let capacity = cautious_capacity::(seq.size_hint()); + let mut values = IndexSet::with_capacity_and_hasher(capacity, S::default()); + + while let Some(value) = seq.next_element()? { + values.insert(value); + } + + Ok(values) + } +} + +impl<'de, T, S> Deserialize<'de> for IndexSet +where + T: Deserialize<'de> + Eq + Hash, + S: Default + BuildHasher, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(IndexSetVisitor(PhantomData)) + } +} + +impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet +where + T: IntoDeserializer<'de, E> + Eq + Hash, + S: BuildHasher, + E: Error, +{ + type Deserializer = SeqDeserializer<::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + SeqDeserializer::new(self.into_iter()) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set.rs new file mode 100644 index 000000000000..5a91db95aec7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set.rs @@ -0,0 +1,1290 @@ +//! A hash set implemented using [`IndexMap`] + +mod iter; +mod mutable; +mod slice; + +#[cfg(test)] +mod tests; + +pub use self::iter::{ + Difference, Drain, Intersection, IntoIter, Iter, Splice, SymmetricDifference, Union, +}; +pub use self::mutable::MutableValues; +pub use self::slice::Slice; + +#[cfg(feature = "rayon")] +pub use crate::rayon::set as rayon; +use crate::TryReserveError; + +#[cfg(feature = "std")] +use std::collections::hash_map::RandomState; + +use crate::util::try_simplify_range; +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::ops::{BitAnd, BitOr, BitXor, Index, RangeBounds, Sub}; + +use super::{Entries, Equivalent, IndexMap}; + +type Bucket = super::Bucket; + +/// A hash set where the iteration order of the values is independent of their +/// hash values. +/// +/// The interface is closely compatible with the standard +/// [`HashSet`][std::collections::HashSet], +/// but also has additional features. +/// +/// # Order +/// +/// The values have a consistent order that is determined by the sequence of +/// insertion and removal calls on the set. The order does not depend on the +/// values or the hash function at all. Note that insertion order and value +/// are not affected if a re-insertion is attempted once an element is +/// already present. +/// +/// All iterators traverse the set *in order*. Set operation iterators like +/// [`IndexSet::union`] produce a concatenated order, as do their matching "bitwise" +/// operators. See their documentation for specifics. +/// +/// The insertion order is preserved, with **notable exceptions** like the +/// [`.remove()`][Self::remove] or [`.swap_remove()`][Self::swap_remove] methods. +/// Methods such as [`.sort_by()`][Self::sort_by] of +/// course result in a new order, depending on the sorting order. +/// +/// # Indices +/// +/// The values are indexed in a compact range without holes in the range +/// `0..self.len()`. For example, the method `.get_full` looks up the index for +/// a value, and the method `.get_index` looks up the value by index. +/// +/// # Complexity +/// +/// Internally, `IndexSet` just holds an [`IndexMap`](IndexMap). Thus the complexity +/// of the two are the same for most methods. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexSet; +/// +/// // Collects which letters appear in a sentence. +/// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect(); +/// +/// assert!(letters.contains(&'s')); +/// assert!(letters.contains(&'t')); +/// assert!(letters.contains(&'u')); +/// assert!(!letters.contains(&'y')); +/// ``` +#[cfg(feature = "std")] +pub struct IndexSet { + pub(crate) map: IndexMap, +} +#[cfg(not(feature = "std"))] +pub struct IndexSet { + pub(crate) map: IndexMap, +} + +impl Clone for IndexSet +where + T: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + IndexSet { + map: self.map.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.map.clone_from(&other.map); + } +} + +impl Entries for IndexSet { + type Entry = Bucket; + + #[inline] + fn into_entries(self) -> Vec { + self.map.into_entries() + } + + #[inline] + fn as_entries(&self) -> &[Self::Entry] { + self.map.as_entries() + } + + #[inline] + fn as_entries_mut(&mut self) -> &mut [Self::Entry] { + self.map.as_entries_mut() + } + + fn with_entries(&mut self, f: F) + where + F: FnOnce(&mut [Self::Entry]), + { + self.map.with_entries(f); + } +} + +impl fmt::Debug for IndexSet +where + T: fmt::Debug, +{ + #[cfg(not(feature = "test_debug"))] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } + + #[cfg(feature = "test_debug")] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Let the inner `IndexMap` print all of its details + f.debug_struct("IndexSet").field("map", &self.map).finish() + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl IndexSet { + /// Create a new set. (Does not allocate.) + pub fn new() -> Self { + IndexSet { + map: IndexMap::new(), + } + } + + /// Create a new set with capacity for `n` elements. + /// (Does not allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + pub fn with_capacity(n: usize) -> Self { + IndexSet { + map: IndexMap::with_capacity(n), + } + } +} + +impl IndexSet { + /// Create a new set with capacity for `n` elements. + /// (Does not allocate if `n` is zero.) + /// + /// Computes in **O(n)** time. + pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { + IndexSet { + map: IndexMap::with_capacity_and_hasher(n, hash_builder), + } + } + + /// Create a new set with `hash_builder`. + /// + /// This function is `const`, so it + /// can be called in `static` contexts. + pub const fn with_hasher(hash_builder: S) -> Self { + IndexSet { + map: IndexMap::with_hasher(hash_builder), + } + } + + /// Return the number of elements the set can hold without reallocating. + /// + /// This number is a lower bound; the set might be able to hold more, + /// but is guaranteed to be able to hold at least this many. + /// + /// Computes in **O(1)** time. + pub fn capacity(&self) -> usize { + self.map.capacity() + } + + /// Return a reference to the set's `BuildHasher`. + pub fn hasher(&self) -> &S { + self.map.hasher() + } + + /// Return the number of elements in the set. + /// + /// Computes in **O(1)** time. + pub fn len(&self) -> usize { + self.map.len() + } + + /// Returns true if the set contains no elements. + /// + /// Computes in **O(1)** time. + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Return an iterator over the values of the set, in their order + pub fn iter(&self) -> Iter<'_, T> { + Iter::new(self.as_entries()) + } + + /// Remove all elements in the set, while preserving its capacity. + /// + /// Computes in **O(n)** time. + pub fn clear(&mut self) { + self.map.clear(); + } + + /// Shortens the set, keeping the first `len` elements and dropping the rest. + /// + /// If `len` is greater than the set's current length, this has no effect. + pub fn truncate(&mut self, len: usize) { + self.map.truncate(len); + } + + /// Clears the `IndexSet` in the given index range, returning those values + /// as a drain iterator. + /// + /// The range may be any type that implements [`RangeBounds`], + /// including all of the `std::ops::Range*` types, or even a tuple pair of + /// `Bound` start and end values. To drain the set entirely, use `RangeFull` + /// like `set.drain(..)`. + /// + /// This shifts down all entries following the drained range to fill the + /// gap, and keeps the allocated memory for reuse. + /// + /// ***Panics*** if the starting point is greater than the end point or if + /// the end point is greater than the length of the set. + pub fn drain(&mut self, range: R) -> Drain<'_, T> + where + R: RangeBounds, + { + Drain::new(self.map.core.drain(range)) + } + + /// Splits the collection into two at the given index. + /// + /// Returns a newly allocated set containing the elements in the range + /// `[at, len)`. After the call, the original set will be left containing + /// the elements `[0, at)` with its previous capacity unchanged. + /// + /// ***Panics*** if `at > len`. + pub fn split_off(&mut self, at: usize) -> Self + where + S: Clone, + { + Self { + map: self.map.split_off(at), + } + } + + /// Reserve capacity for `additional` more values. + /// + /// Computes in **O(n)** time. + pub fn reserve(&mut self, additional: usize) { + self.map.reserve(additional); + } + + /// Reserve capacity for `additional` more values, without over-allocating. + /// + /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid + /// frequent re-allocations. However, the underlying data structures may still have internal + /// capacity requirements, and the allocator itself may give more space than requested, so this + /// cannot be relied upon to be precisely minimal. + /// + /// Computes in **O(n)** time. + pub fn reserve_exact(&mut self, additional: usize) { + self.map.reserve_exact(additional); + } + + /// Try to reserve capacity for `additional` more values. + /// + /// Computes in **O(n)** time. + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.map.try_reserve(additional) + } + + /// Try to reserve capacity for `additional` more values, without over-allocating. + /// + /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid + /// frequent re-allocations. However, the underlying data structures may still have internal + /// capacity requirements, and the allocator itself may give more space than requested, so this + /// cannot be relied upon to be precisely minimal. + /// + /// Computes in **O(n)** time. + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.map.try_reserve_exact(additional) + } + + /// Shrink the capacity of the set as much as possible. + /// + /// Computes in **O(n)** time. + pub fn shrink_to_fit(&mut self) { + self.map.shrink_to_fit(); + } + + /// Shrink the capacity of the set with a lower limit. + /// + /// Computes in **O(n)** time. + pub fn shrink_to(&mut self, min_capacity: usize) { + self.map.shrink_to(min_capacity); + } +} + +impl IndexSet +where + T: Hash + Eq, + S: BuildHasher, +{ + /// Insert the value into the set. + /// + /// If an equivalent item already exists in the set, it returns + /// `false` leaving the original value in the set and without + /// altering its insertion order. Otherwise, it inserts the new + /// item and returns `true`. + /// + /// Computes in **O(1)** time (amortized average). + pub fn insert(&mut self, value: T) -> bool { + self.map.insert(value, ()).is_none() + } + + /// Insert the value into the set, and get its index. + /// + /// If an equivalent item already exists in the set, it returns + /// the index of the existing item and `false`, leaving the + /// original value in the set and without altering its insertion + /// order. Otherwise, it inserts the new item and returns the index + /// of the inserted item and `true`. + /// + /// Computes in **O(1)** time (amortized average). + pub fn insert_full(&mut self, value: T) -> (usize, bool) { + let (index, existing) = self.map.insert_full(value, ()); + (index, existing.is_none()) + } + + /// Insert the value into the set at its ordered position among sorted values. + /// + /// This is equivalent to finding the position with + /// [`binary_search`][Self::binary_search], and if needed calling + /// [`insert_before`][Self::insert_before] for a new value. + /// + /// If the sorted item is found in the set, it returns the index of that + /// existing item and `false`, without any change. Otherwise, it inserts the + /// new item and returns its sorted index and `true`. + /// + /// If the existing items are **not** already sorted, then the insertion + /// index is unspecified (like [`slice::binary_search`]), but the value + /// is moved to or inserted at that position regardless. + /// + /// Computes in **O(n)** time (average). Instead of repeating calls to + /// `insert_sorted`, it may be faster to call batched [`insert`][Self::insert] + /// or [`extend`][Self::extend] and only call [`sort`][Self::sort] or + /// [`sort_unstable`][Self::sort_unstable] once. + pub fn insert_sorted(&mut self, value: T) -> (usize, bool) + where + T: Ord, + { + let (index, existing) = self.map.insert_sorted(value, ()); + (index, existing.is_none()) + } + + /// Insert the value into the set before the value at the given index, or at the end. + /// + /// If an equivalent item already exists in the set, it returns `false` leaving the + /// original value in the set, but moved to the new position. The returned index + /// will either be the given index or one less, depending on how the value moved. + /// (See [`shift_insert`](Self::shift_insert) for different behavior here.) + /// + /// Otherwise, it inserts the new value exactly at the given index and returns `true`. + /// + /// ***Panics*** if `index` is out of bounds. + /// Valid indices are `0..=set.len()` (inclusive). + /// + /// Computes in **O(n)** time (average). + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexSet; + /// let mut set: IndexSet = ('a'..='z').collect(); + /// + /// // The new value '*' goes exactly at the given index. + /// assert_eq!(set.get_index_of(&'*'), None); + /// assert_eq!(set.insert_before(10, '*'), (10, true)); + /// assert_eq!(set.get_index_of(&'*'), Some(10)); + /// + /// // Moving the value 'a' up will shift others down, so this moves *before* 10 to index 9. + /// assert_eq!(set.insert_before(10, 'a'), (9, false)); + /// assert_eq!(set.get_index_of(&'a'), Some(9)); + /// assert_eq!(set.get_index_of(&'*'), Some(10)); + /// + /// // Moving the value 'z' down will shift others up, so this moves to exactly 10. + /// assert_eq!(set.insert_before(10, 'z'), (10, false)); + /// assert_eq!(set.get_index_of(&'z'), Some(10)); + /// assert_eq!(set.get_index_of(&'*'), Some(11)); + /// + /// // Moving or inserting before the endpoint is also valid. + /// assert_eq!(set.len(), 27); + /// assert_eq!(set.insert_before(set.len(), '*'), (26, false)); + /// assert_eq!(set.get_index_of(&'*'), Some(26)); + /// assert_eq!(set.insert_before(set.len(), '+'), (27, true)); + /// assert_eq!(set.get_index_of(&'+'), Some(27)); + /// assert_eq!(set.len(), 28); + /// ``` + pub fn insert_before(&mut self, index: usize, value: T) -> (usize, bool) { + let (index, existing) = self.map.insert_before(index, value, ()); + (index, existing.is_none()) + } + + /// Insert the value into the set at the given index. + /// + /// If an equivalent item already exists in the set, it returns `false` leaving + /// the original value in the set, but moved to the given index. + /// Note that existing values **cannot** be moved to `index == set.len()`! + /// (See [`insert_before`](Self::insert_before) for different behavior here.) + /// + /// Otherwise, it inserts the new value at the given index and returns `true`. + /// + /// ***Panics*** if `index` is out of bounds. + /// Valid indices are `0..set.len()` (exclusive) when moving an existing value, or + /// `0..=set.len()` (inclusive) when inserting a new value. + /// + /// Computes in **O(n)** time (average). + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexSet; + /// let mut set: IndexSet = ('a'..='z').collect(); + /// + /// // The new value '*' goes exactly at the given index. + /// assert_eq!(set.get_index_of(&'*'), None); + /// assert_eq!(set.shift_insert(10, '*'), true); + /// assert_eq!(set.get_index_of(&'*'), Some(10)); + /// + /// // Moving the value 'a' up to 10 will shift others down, including the '*' that was at 10. + /// assert_eq!(set.shift_insert(10, 'a'), false); + /// assert_eq!(set.get_index_of(&'a'), Some(10)); + /// assert_eq!(set.get_index_of(&'*'), Some(9)); + /// + /// // Moving the value 'z' down to 9 will shift others up, including the '*' that was at 9. + /// assert_eq!(set.shift_insert(9, 'z'), false); + /// assert_eq!(set.get_index_of(&'z'), Some(9)); + /// assert_eq!(set.get_index_of(&'*'), Some(10)); + /// + /// // Existing values can move to len-1 at most, but new values can insert at the endpoint. + /// assert_eq!(set.len(), 27); + /// assert_eq!(set.shift_insert(set.len() - 1, '*'), false); + /// assert_eq!(set.get_index_of(&'*'), Some(26)); + /// assert_eq!(set.shift_insert(set.len(), '+'), true); + /// assert_eq!(set.get_index_of(&'+'), Some(27)); + /// assert_eq!(set.len(), 28); + /// ``` + /// + /// ```should_panic + /// use indexmap::IndexSet; + /// let mut set: IndexSet = ('a'..='z').collect(); + /// + /// // This is an invalid index for moving an existing value! + /// set.shift_insert(set.len(), 'a'); + /// ``` + pub fn shift_insert(&mut self, index: usize, value: T) -> bool { + self.map.shift_insert(index, value, ()).is_none() + } + + /// Adds a value to the set, replacing the existing value, if any, that is + /// equal to the given one, without altering its insertion order. Returns + /// the replaced value. + /// + /// Computes in **O(1)** time (average). + pub fn replace(&mut self, value: T) -> Option { + self.replace_full(value).1 + } + + /// Adds a value to the set, replacing the existing value, if any, that is + /// equal to the given one, without altering its insertion order. Returns + /// the index of the item and its replaced value. + /// + /// Computes in **O(1)** time (average). + pub fn replace_full(&mut self, value: T) -> (usize, Option) { + let hash = self.map.hash(&value); + match self.map.core.replace_full(hash, value, ()) { + (i, Some((replaced, ()))) => (i, Some(replaced)), + (i, None) => (i, None), + } + } + + /// Return an iterator over the values that are in `self` but not `other`. + /// + /// Values are produced in the same order that they appear in `self`. + pub fn difference<'a, S2>(&'a self, other: &'a IndexSet) -> Difference<'a, T, S2> + where + S2: BuildHasher, + { + Difference::new(self, other) + } + + /// Return an iterator over the values that are in `self` or `other`, + /// but not in both. + /// + /// Values from `self` are produced in their original order, followed by + /// values from `other` in their original order. + pub fn symmetric_difference<'a, S2>( + &'a self, + other: &'a IndexSet, + ) -> SymmetricDifference<'a, T, S, S2> + where + S2: BuildHasher, + { + SymmetricDifference::new(self, other) + } + + /// Return an iterator over the values that are in both `self` and `other`. + /// + /// Values are produced in the same order that they appear in `self`. + pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet) -> Intersection<'a, T, S2> + where + S2: BuildHasher, + { + Intersection::new(self, other) + } + + /// Return an iterator over all values that are in `self` or `other`. + /// + /// Values from `self` are produced in their original order, followed by + /// values that are unique to `other` in their original order. + pub fn union<'a, S2>(&'a self, other: &'a IndexSet) -> Union<'a, T, S> + where + S2: BuildHasher, + { + Union::new(self, other) + } + + /// Creates a splicing iterator that replaces the specified range in the set + /// with the given `replace_with` iterator and yields the removed items. + /// `replace_with` does not need to be the same length as `range`. + /// + /// The `range` is removed even if the iterator is not consumed until the + /// end. It is unspecified how many elements are removed from the set if the + /// `Splice` value is leaked. + /// + /// The input iterator `replace_with` is only consumed when the `Splice` + /// value is dropped. If a value from the iterator matches an existing entry + /// in the set (outside of `range`), then the original will be unchanged. + /// Otherwise, the new value will be inserted in the replaced `range`. + /// + /// ***Panics*** if the starting point is greater than the end point or if + /// the end point is greater than the length of the set. + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexSet; + /// + /// let mut set = IndexSet::from([0, 1, 2, 3, 4]); + /// let new = [5, 4, 3, 2, 1]; + /// let removed: Vec<_> = set.splice(2..4, new).collect(); + /// + /// // 1 and 4 kept their positions, while 5, 3, and 2 were newly inserted. + /// assert!(set.into_iter().eq([0, 1, 5, 3, 2, 4])); + /// assert_eq!(removed, &[2, 3]); + /// ``` + pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, T, S> + where + R: RangeBounds, + I: IntoIterator, + { + Splice::new(self, range, replace_with.into_iter()) + } + + /// Moves all values from `other` into `self`, leaving `other` empty. + /// + /// This is equivalent to calling [`insert`][Self::insert] for each value + /// from `other` in order, which means that values that already exist + /// in `self` are unchanged in their current position. + /// + /// See also [`union`][Self::union] to iterate the combined values by + /// reference, without modifying `self` or `other`. + /// + /// # Examples + /// + /// ``` + /// use indexmap::IndexSet; + /// + /// let mut a = IndexSet::from([3, 2, 1]); + /// let mut b = IndexSet::from([3, 4, 5]); + /// let old_capacity = b.capacity(); + /// + /// a.append(&mut b); + /// + /// assert_eq!(a.len(), 5); + /// assert_eq!(b.len(), 0); + /// assert_eq!(b.capacity(), old_capacity); + /// + /// assert!(a.iter().eq(&[3, 2, 1, 4, 5])); + /// ``` + pub fn append(&mut self, other: &mut IndexSet) { + self.map.append(&mut other.map); + } +} + +impl IndexSet +where + S: BuildHasher, +{ + /// Return `true` if an equivalent to `value` exists in the set. + /// + /// Computes in **O(1)** time (average). + pub fn contains(&self, value: &Q) -> bool + where + Q: ?Sized + Hash + Equivalent, + { + self.map.contains_key(value) + } + + /// Return a reference to the value stored in the set, if it is present, + /// else `None`. + /// + /// Computes in **O(1)** time (average). + pub fn get(&self, value: &Q) -> Option<&T> + where + Q: ?Sized + Hash + Equivalent, + { + self.map.get_key_value(value).map(|(x, &())| x) + } + + /// Return item index and value + pub fn get_full(&self, value: &Q) -> Option<(usize, &T)> + where + Q: ?Sized + Hash + Equivalent, + { + self.map.get_full(value).map(|(i, x, &())| (i, x)) + } + + /// Return item index, if it exists in the set + /// + /// Computes in **O(1)** time (average). + pub fn get_index_of(&self, value: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.map.get_index_of(value) + } + + /// Remove the value from the set, and return `true` if it was present. + /// + /// **NOTE:** This is equivalent to [`.swap_remove(value)`][Self::swap_remove], replacing this + /// value's position with the last element, and it is deprecated in favor of calling that + /// explicitly. If you need to preserve the relative order of the values in the set, use + /// [`.shift_remove(value)`][Self::shift_remove] instead. + #[deprecated(note = "`remove` disrupts the set order -- \ + use `swap_remove` or `shift_remove` for explicit behavior.")] + pub fn remove(&mut self, value: &Q) -> bool + where + Q: ?Sized + Hash + Equivalent, + { + self.swap_remove(value) + } + + /// Remove the value from the set, and return `true` if it was present. + /// + /// Like [`Vec::swap_remove`], the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Return `false` if `value` was not in the set. + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(&mut self, value: &Q) -> bool + where + Q: ?Sized + Hash + Equivalent, + { + self.map.swap_remove(value).is_some() + } + + /// Remove the value from the set, and return `true` if it was present. + /// + /// Like [`Vec::remove`], the value is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `false` if `value` was not in the set. + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(&mut self, value: &Q) -> bool + where + Q: ?Sized + Hash + Equivalent, + { + self.map.shift_remove(value).is_some() + } + + /// Removes and returns the value in the set, if any, that is equal to the + /// given one. + /// + /// **NOTE:** This is equivalent to [`.swap_take(value)`][Self::swap_take], replacing this + /// value's position with the last element, and it is deprecated in favor of calling that + /// explicitly. If you need to preserve the relative order of the values in the set, use + /// [`.shift_take(value)`][Self::shift_take] instead. + #[deprecated(note = "`take` disrupts the set order -- \ + use `swap_take` or `shift_take` for explicit behavior.")] + pub fn take(&mut self, value: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.swap_take(value) + } + + /// Removes and returns the value in the set, if any, that is equal to the + /// given one. + /// + /// Like [`Vec::swap_remove`], the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Return `None` if `value` was not in the set. + /// + /// Computes in **O(1)** time (average). + pub fn swap_take(&mut self, value: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.map.swap_remove_entry(value).map(|(x, ())| x) + } + + /// Removes and returns the value in the set, if any, that is equal to the + /// given one. + /// + /// Like [`Vec::remove`], the value is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `value` was not in the set. + /// + /// Computes in **O(n)** time (average). + pub fn shift_take(&mut self, value: &Q) -> Option + where + Q: ?Sized + Hash + Equivalent, + { + self.map.shift_remove_entry(value).map(|(x, ())| x) + } + + /// Remove the value from the set return it and the index it had. + /// + /// Like [`Vec::swap_remove`], the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Return `None` if `value` was not in the set. + pub fn swap_remove_full(&mut self, value: &Q) -> Option<(usize, T)> + where + Q: ?Sized + Hash + Equivalent, + { + self.map.swap_remove_full(value).map(|(i, x, ())| (i, x)) + } + + /// Remove the value from the set return it and the index it had. + /// + /// Like [`Vec::remove`], the value is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Return `None` if `value` was not in the set. + pub fn shift_remove_full(&mut self, value: &Q) -> Option<(usize, T)> + where + Q: ?Sized + Hash + Equivalent, + { + self.map.shift_remove_full(value).map(|(i, x, ())| (i, x)) + } +} + +impl IndexSet { + /// Remove the last value + /// + /// This preserves the order of the remaining elements. + /// + /// Computes in **O(1)** time (average). + #[doc(alias = "pop_last")] // like `BTreeSet` + pub fn pop(&mut self) -> Option { + self.map.pop().map(|(x, ())| x) + } + + /// Scan through each value in the set and keep those where the + /// closure `keep` returns `true`. + /// + /// The elements are visited in order, and remaining elements keep their + /// order. + /// + /// Computes in **O(n)** time (average). + pub fn retain(&mut self, mut keep: F) + where + F: FnMut(&T) -> bool, + { + self.map.retain(move |x, &mut ()| keep(x)) + } + + /// Sort the set’s values by their default ordering. + /// + /// This is a stable sort -- but equivalent values should not normally coexist in + /// a set at all, so [`sort_unstable`][Self::sort_unstable] is preferred + /// because it is generally faster and doesn't allocate auxiliary memory. + /// + /// See [`sort_by`](Self::sort_by) for details. + pub fn sort(&mut self) + where + T: Ord, + { + self.map.sort_keys() + } + + /// Sort the set’s values in place using the comparison function `cmp`. + /// + /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. + pub fn sort_by(&mut self, mut cmp: F) + where + F: FnMut(&T, &T) -> Ordering, + { + self.map.sort_by(move |a, _, b, _| cmp(a, b)); + } + + /// Sort the values of the set and return a by-value iterator of + /// the values with the result. + /// + /// The sort is stable. + pub fn sorted_by(self, mut cmp: F) -> IntoIter + where + F: FnMut(&T, &T) -> Ordering, + { + let mut entries = self.into_entries(); + entries.sort_by(move |a, b| cmp(&a.key, &b.key)); + IntoIter::new(entries) + } + + /// Sort the set's values by their default ordering. + /// + /// See [`sort_unstable_by`](Self::sort_unstable_by) for details. + pub fn sort_unstable(&mut self) + where + T: Ord, + { + self.map.sort_unstable_keys() + } + + /// Sort the set's values in place using the comparison function `cmp`. + /// + /// Computes in **O(n log n)** time. The sort is unstable. + pub fn sort_unstable_by(&mut self, mut cmp: F) + where + F: FnMut(&T, &T) -> Ordering, + { + self.map.sort_unstable_by(move |a, _, b, _| cmp(a, b)) + } + + /// Sort the values of the set and return a by-value iterator of + /// the values with the result. + pub fn sorted_unstable_by(self, mut cmp: F) -> IntoIter + where + F: FnMut(&T, &T) -> Ordering, + { + let mut entries = self.into_entries(); + entries.sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); + IntoIter::new(entries) + } + + /// Sort the set’s values in place using a key extraction function. + /// + /// During sorting, the function is called at most once per entry, by using temporary storage + /// to remember the results of its evaluation. The order of calls to the function is + /// unspecified and may change between versions of `indexmap` or the standard library. + /// + /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is + /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable. + pub fn sort_by_cached_key(&mut self, mut sort_key: F) + where + K: Ord, + F: FnMut(&T) -> K, + { + self.with_entries(move |entries| { + entries.sort_by_cached_key(move |a| sort_key(&a.key)); + }); + } + + /// Search over a sorted set for a value. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search`] for more details. + /// + /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up + /// using [`get_index_of`][IndexSet::get_index_of], but this can also position missing values. + pub fn binary_search(&self, x: &T) -> Result + where + T: Ord, + { + self.as_slice().binary_search(x) + } + + /// Search over a sorted set with a comparator function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result + where + F: FnMut(&'a T) -> Ordering, + { + self.as_slice().binary_search_by(f) + } + + /// Search over a sorted set with an extraction function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result + where + F: FnMut(&'a T) -> B, + B: Ord, + { + self.as_slice().binary_search_by_key(b, f) + } + + /// Returns the index of the partition point of a sorted set according to the given predicate + /// (the index of the first element of the second partition). + /// + /// See [`slice::partition_point`] for more details. + /// + /// Computes in **O(log(n))** time. + #[must_use] + pub fn partition_point

(&self, pred: P) -> usize + where + P: FnMut(&T) -> bool, + { + self.as_slice().partition_point(pred) + } + + /// Reverses the order of the set’s values in place. + /// + /// Computes in **O(n)** time and **O(1)** space. + pub fn reverse(&mut self) { + self.map.reverse() + } + + /// Returns a slice of all the values in the set. + /// + /// Computes in **O(1)** time. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.as_entries()) + } + + /// Converts into a boxed slice of all the values in the set. + /// + /// Note that this will drop the inner hash table and any excess capacity. + pub fn into_boxed_slice(self) -> Box> { + Slice::from_boxed(self.into_entries().into_boxed_slice()) + } + + /// Get a value by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_index(&self, index: usize) -> Option<&T> { + self.as_entries().get(index).map(Bucket::key_ref) + } + + /// Returns a slice of values in the given range of indices. + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + pub fn get_range>(&self, range: R) -> Option<&Slice> { + let entries = self.as_entries(); + let range = try_simplify_range(range, entries.len())?; + entries.get(range).map(Slice::from_slice) + } + + /// Get the first value + /// + /// Computes in **O(1)** time. + pub fn first(&self) -> Option<&T> { + self.as_entries().first().map(Bucket::key_ref) + } + + /// Get the last value + /// + /// Computes in **O(1)** time. + pub fn last(&self) -> Option<&T> { + self.as_entries().last().map(Bucket::key_ref) + } + + /// Remove the value by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Like [`Vec::swap_remove`], the value is removed by swapping it with the + /// last element of the set and popping it off. **This perturbs + /// the position of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_index(&mut self, index: usize) -> Option { + self.map.swap_remove_index(index).map(|(x, ())| x) + } + + /// Remove the value by index + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Like [`Vec::remove`], the value is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_index(&mut self, index: usize) -> Option { + self.map.shift_remove_index(index).map(|(x, ())| x) + } + + /// Moves the position of a value from one index to another + /// by shifting all other values in-between. + /// + /// * If `from < to`, the other values will shift down while the targeted value moves up. + /// * If `from > to`, the other values will shift up while the targeted value moves down. + /// + /// ***Panics*** if `from` or `to` are out of bounds. + /// + /// Computes in **O(n)** time (average). + pub fn move_index(&mut self, from: usize, to: usize) { + self.map.move_index(from, to) + } + + /// Swaps the position of two values in the set. + /// + /// ***Panics*** if `a` or `b` are out of bounds. + /// + /// Computes in **O(1)** time (average). + pub fn swap_indices(&mut self, a: usize, b: usize) { + self.map.swap_indices(a, b) + } +} + +/// Access [`IndexSet`] values at indexed positions. +/// +/// # Examples +/// +/// ``` +/// use indexmap::IndexSet; +/// +/// let mut set = IndexSet::new(); +/// for word in "Lorem ipsum dolor sit amet".split_whitespace() { +/// set.insert(word.to_string()); +/// } +/// assert_eq!(set[0], "Lorem"); +/// assert_eq!(set[1], "ipsum"); +/// set.reverse(); +/// assert_eq!(set[0], "amet"); +/// assert_eq!(set[1], "sit"); +/// set.sort(); +/// assert_eq!(set[0], "Lorem"); +/// assert_eq!(set[1], "amet"); +/// ``` +/// +/// ```should_panic +/// use indexmap::IndexSet; +/// +/// let mut set = IndexSet::new(); +/// set.insert("foo"); +/// println!("{:?}", set[10]); // panics! +/// ``` +impl Index for IndexSet { + type Output = T; + + /// Returns a reference to the value at the supplied `index`. + /// + /// ***Panics*** if `index` is out of bounds. + fn index(&self, index: usize) -> &T { + self.get_index(index) + .expect("IndexSet: index out of bounds") + } +} + +impl FromIterator for IndexSet +where + T: Hash + Eq, + S: BuildHasher + Default, +{ + fn from_iter>(iterable: I) -> Self { + let iter = iterable.into_iter().map(|x| (x, ())); + IndexSet { + map: IndexMap::from_iter(iter), + } + } +} + +#[cfg(feature = "std")] +#[cfg_attr(docsrs, doc(cfg(feature = "std")))] +impl From<[T; N]> for IndexSet +where + T: Eq + Hash, +{ + /// # Examples + /// + /// ``` + /// use indexmap::IndexSet; + /// + /// let set1 = IndexSet::from([1, 2, 3, 4]); + /// let set2: IndexSet<_> = [1, 2, 3, 4].into(); + /// assert_eq!(set1, set2); + /// ``` + fn from(arr: [T; N]) -> Self { + Self::from_iter(arr) + } +} + +impl Extend for IndexSet +where + T: Hash + Eq, + S: BuildHasher, +{ + fn extend>(&mut self, iterable: I) { + let iter = iterable.into_iter().map(|x| (x, ())); + self.map.extend(iter); + } +} + +impl<'a, T, S> Extend<&'a T> for IndexSet +where + T: Hash + Eq + Copy + 'a, + S: BuildHasher, +{ + fn extend>(&mut self, iterable: I) { + let iter = iterable.into_iter().copied(); + self.extend(iter); + } +} + +impl Default for IndexSet +where + S: Default, +{ + /// Return an empty [`IndexSet`] + fn default() -> Self { + IndexSet { + map: IndexMap::default(), + } + } +} + +impl PartialEq> for IndexSet +where + T: Hash + Eq, + S1: BuildHasher, + S2: BuildHasher, +{ + fn eq(&self, other: &IndexSet) -> bool { + self.len() == other.len() && self.is_subset(other) + } +} + +impl Eq for IndexSet +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl IndexSet +where + T: Eq + Hash, + S: BuildHasher, +{ + /// Returns `true` if `self` has no elements in common with `other`. + pub fn is_disjoint(&self, other: &IndexSet) -> bool + where + S2: BuildHasher, + { + if self.len() <= other.len() { + self.iter().all(move |value| !other.contains(value)) + } else { + other.iter().all(move |value| !self.contains(value)) + } + } + + /// Returns `true` if all elements of `self` are contained in `other`. + pub fn is_subset(&self, other: &IndexSet) -> bool + where + S2: BuildHasher, + { + self.len() <= other.len() && self.iter().all(move |value| other.contains(value)) + } + + /// Returns `true` if all elements of `other` are contained in `self`. + pub fn is_superset(&self, other: &IndexSet) -> bool + where + S2: BuildHasher, + { + other.is_subset(self) + } +} + +impl BitAnd<&IndexSet> for &IndexSet +where + T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set intersection, cloned into a new set. + /// + /// Values are collected in the same order that they appear in `self`. + fn bitand(self, other: &IndexSet) -> Self::Output { + self.intersection(other).cloned().collect() + } +} + +impl BitOr<&IndexSet> for &IndexSet +where + T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set union, cloned into a new set. + /// + /// Values from `self` are collected in their original order, followed by + /// values that are unique to `other` in their original order. + fn bitor(self, other: &IndexSet) -> Self::Output { + self.union(other).cloned().collect() + } +} + +impl BitXor<&IndexSet> for &IndexSet +where + T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set symmetric-difference, cloned into a new set. + /// + /// Values from `self` are collected in their original order, followed by + /// values from `other` in their original order. + fn bitxor(self, other: &IndexSet) -> Self::Output { + self.symmetric_difference(other).cloned().collect() + } +} + +impl Sub<&IndexSet> for &IndexSet +where + T: Eq + Hash + Clone, + S1: BuildHasher + Default, + S2: BuildHasher, +{ + type Output = IndexSet; + + /// Returns the set difference, cloned into a new set. + /// + /// Values are collected in the same order that they appear in `self`. + fn sub(self, other: &IndexSet) -> Self::Output { + self.difference(other).cloned().collect() + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/iter.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/iter.rs new file mode 100644 index 000000000000..31982760627c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/iter.rs @@ -0,0 +1,627 @@ +use super::{Bucket, Entries, IndexSet, Slice}; + +use alloc::vec::{self, Vec}; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::iter::{Chain, FusedIterator}; +use core::ops::RangeBounds; +use core::slice::Iter as SliceIter; + +impl<'a, T, S> IntoIterator for &'a IndexSet { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for IndexSet { + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self.into_entries()) + } +} + +/// An iterator over the items of an [`IndexSet`]. +/// +/// This `struct` is created by the [`IndexSet::iter`] method. +/// See its documentation for more. +pub struct Iter<'a, T> { + iter: SliceIter<'a, Bucket>, +} + +impl<'a, T> Iter<'a, T> { + pub(super) fn new(entries: &'a [Bucket]) -> Self { + Self { + iter: entries.iter(), + } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &'a Slice { + Slice::from_slice(self.iter.as_slice()) + } +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + iterator_methods!(Bucket::key_ref); +} + +impl DoubleEndedIterator for Iter<'_, T> { + double_ended_iterator_methods!(Bucket::key_ref); +} + +impl ExactSizeIterator for Iter<'_, T> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Iter<'_, T> {} + +impl Clone for Iter<'_, T> { + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Default for Iter<'_, T> { + fn default() -> Self { + Self { iter: [].iter() } + } +} + +/// An owning iterator over the items of an [`IndexSet`]. +/// +/// This `struct` is created by the [`IndexSet::into_iter`] method +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +#[derive(Clone)] +pub struct IntoIter { + iter: vec::IntoIter>, +} + +impl IntoIter { + pub(super) fn new(entries: Vec>) -> Self { + Self { + iter: entries.into_iter(), + } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.iter.as_slice()) + } +} + +impl Iterator for IntoIter { + type Item = T; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for IntoIter { + double_ended_iterator_methods!(Bucket::key); +} + +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +impl Default for IntoIter { + fn default() -> Self { + Self { + iter: Vec::new().into_iter(), + } + } +} + +/// A draining iterator over the items of an [`IndexSet`]. +/// +/// This `struct` is created by the [`IndexSet::drain`] method. +/// See its documentation for more. +pub struct Drain<'a, T> { + iter: vec::Drain<'a, Bucket>, +} + +impl<'a, T> Drain<'a, T> { + pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { + Self { iter } + } + + /// Returns a slice of the remaining entries in the iterator. + pub fn as_slice(&self) -> &Slice { + Slice::from_slice(self.iter.as_slice()) + } +} + +impl Iterator for Drain<'_, T> { + type Item = T; + + iterator_methods!(Bucket::key); +} + +impl DoubleEndedIterator for Drain<'_, T> { + double_ended_iterator_methods!(Bucket::key); +} + +impl ExactSizeIterator for Drain<'_, T> { + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Drain<'_, T> {} + +impl fmt::Debug for Drain<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = self.iter.as_slice().iter().map(Bucket::key_ref); + f.debug_list().entries(iter).finish() + } +} + +/// A lazy iterator producing elements in the difference of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::difference`] method. +/// See its documentation for more. +pub struct Difference<'a, T, S> { + iter: Iter<'a, T>, + other: &'a IndexSet, +} + +impl<'a, T, S> Difference<'a, T, S> { + pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { + Self { + iter: set.iter(), + other, + } + } +} + +impl<'a, T, S> Iterator for Difference<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl DoubleEndedIterator for Difference<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + while let Some(item) = self.iter.next_back() { + if !self.other.contains(item) { + return Some(item); + } + } + None + } +} + +impl FusedIterator for Difference<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Difference<'_, T, S> { + fn clone(&self) -> Self { + Difference { + iter: self.iter.clone(), + ..*self + } + } +} + +impl fmt::Debug for Difference<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A lazy iterator producing elements in the intersection of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::intersection`] method. +/// See its documentation for more. +pub struct Intersection<'a, T, S> { + iter: Iter<'a, T>, + other: &'a IndexSet, +} + +impl<'a, T, S> Intersection<'a, T, S> { + pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { + Self { + iter: set.iter(), + other, + } + } +} + +impl<'a, T, S> Iterator for Intersection<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + while let Some(item) = self.iter.next() { + if self.other.contains(item) { + return Some(item); + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, self.iter.size_hint().1) + } +} + +impl DoubleEndedIterator for Intersection<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + while let Some(item) = self.iter.next_back() { + if self.other.contains(item) { + return Some(item); + } + } + None + } +} + +impl FusedIterator for Intersection<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Intersection<'_, T, S> { + fn clone(&self) -> Self { + Intersection { + iter: self.iter.clone(), + ..*self + } + } +} + +impl fmt::Debug for Intersection<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A lazy iterator producing elements in the symmetric difference of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::symmetric_difference`] method. +/// See its documentation for more. +pub struct SymmetricDifference<'a, T, S1, S2> { + iter: Chain, Difference<'a, T, S1>>, +} + +impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self { + let diff1 = set1.difference(set2); + let diff2 = set2.difference(set1); + Self { + iter: diff1.chain(diff2), + } + } +} + +impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } + + fn rfold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.rfold(init, f) + } +} + +impl FusedIterator for SymmetricDifference<'_, T, S1, S2> +where + T: Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ +} + +impl Clone for SymmetricDifference<'_, T, S1, S2> { + fn clone(&self) -> Self { + SymmetricDifference { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> +where + T: fmt::Debug + Eq + Hash, + S1: BuildHasher, + S2: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A lazy iterator producing elements in the union of [`IndexSet`]s. +/// +/// This `struct` is created by the [`IndexSet::union`] method. +/// See its documentation for more. +pub struct Union<'a, T, S> { + iter: Chain, Difference<'a, T, S>>, +} + +impl<'a, T, S> Union<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self + where + S2: BuildHasher, + { + Self { + iter: set1.iter().chain(set2.difference(set1)), + } + } +} + +impl<'a, T, S> Iterator for Union<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.fold(init, f) + } +} + +impl DoubleEndedIterator for Union<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + self.iter.next_back() + } + + fn rfold(self, init: B, f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + self.iter.rfold(init, f) + } +} + +impl FusedIterator for Union<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Union<'_, T, S> { + fn clone(&self) -> Self { + Union { + iter: self.iter.clone(), + } + } +} + +impl fmt::Debug for Union<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A splicing iterator for `IndexSet`. +/// +/// This `struct` is created by [`IndexSet::splice()`]. +/// See its documentation for more. +pub struct Splice<'a, I, T, S> +where + I: Iterator, + T: Hash + Eq, + S: BuildHasher, +{ + iter: crate::map::Splice<'a, UnitValue, T, (), S>, +} + +impl<'a, I, T, S> Splice<'a, I, T, S> +where + I: Iterator, + T: Hash + Eq, + S: BuildHasher, +{ + pub(super) fn new(set: &'a mut IndexSet, range: R, replace_with: I) -> Self + where + R: RangeBounds, + { + Self { + iter: set.map.splice(range, UnitValue(replace_with)), + } + } +} + +impl Iterator for Splice<'_, I, T, S> +where + I: Iterator, + T: Hash + Eq, + S: BuildHasher, +{ + type Item = T; + + fn next(&mut self) -> Option { + Some(self.iter.next()?.0) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl DoubleEndedIterator for Splice<'_, I, T, S> +where + I: Iterator, + T: Hash + Eq, + S: BuildHasher, +{ + fn next_back(&mut self) -> Option { + Some(self.iter.next_back()?.0) + } +} + +impl ExactSizeIterator for Splice<'_, I, T, S> +where + I: Iterator, + T: Hash + Eq, + S: BuildHasher, +{ + fn len(&self) -> usize { + self.iter.len() + } +} + +impl FusedIterator for Splice<'_, I, T, S> +where + I: Iterator, + T: Hash + Eq, + S: BuildHasher, +{ +} + +struct UnitValue(I); + +impl Iterator for UnitValue { + type Item = (I::Item, ()); + + fn next(&mut self) -> Option { + self.0.next().map(|x| (x, ())) + } +} + +impl<'a, I, T, S> fmt::Debug for Splice<'a, I, T, S> +where + I: fmt::Debug + Iterator, + T: fmt::Debug + Hash + Eq, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.iter, f) + } +} + +impl fmt::Debug for UnitValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.0, f) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/mutable.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/mutable.rs new file mode 100644 index 000000000000..21615f341ec2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/mutable.rs @@ -0,0 +1,86 @@ +use core::hash::{BuildHasher, Hash}; + +use super::{Equivalent, IndexSet}; +use crate::map::MutableKeys; + +/// Opt-in mutable access to [`IndexSet`] values. +/// +/// These methods expose `&mut T`, mutable references to the value as it is stored +/// in the set. +/// You are allowed to modify the values in the set **if the modification +/// does not change the value’s hash and equality**. +/// +/// If values are modified erroneously, you can no longer look them up. +/// This is sound (memory safe) but a logical error hazard (just like +/// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). +/// +/// `use` this trait to enable its methods for `IndexSet`. +/// +/// This trait is sealed and cannot be implemented for types outside this crate. +pub trait MutableValues: private::Sealed { + type Value; + + /// Return item index and mutable reference to the value + /// + /// Computes in **O(1)** time (average). + fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut Self::Value)> + where + Q: ?Sized + Hash + Equivalent; + + /// Return mutable reference to the value at an index. + /// + /// Valid indices are `0 <= index < self.len()`. + /// + /// Computes in **O(1)** time. + fn get_index_mut2(&mut self, index: usize) -> Option<&mut Self::Value>; + + /// Scan through each value in the set and keep those where the + /// closure `keep` returns `true`. + /// + /// The values are visited in order, and remaining values keep their order. + /// + /// Computes in **O(n)** time (average). + fn retain2(&mut self, keep: F) + where + F: FnMut(&mut Self::Value) -> bool; +} + +/// Opt-in mutable access to [`IndexSet`] values. +/// +/// See [`MutableValues`] for more information. +impl MutableValues for IndexSet +where + S: BuildHasher, +{ + type Value = T; + + fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut T)> + where + Q: ?Sized + Hash + Equivalent, + { + match self.map.get_full_mut2(value) { + Some((index, value, ())) => Some((index, value)), + None => None, + } + } + + fn get_index_mut2(&mut self, index: usize) -> Option<&mut T> { + match self.map.get_index_mut2(index) { + Some((value, ())) => Some(value), + None => None, + } + } + + fn retain2(&mut self, mut keep: F) + where + F: FnMut(&mut T) -> bool, + { + self.map.retain2(move |value, ()| keep(value)); + } +} + +mod private { + pub trait Sealed {} + + impl Sealed for super::IndexSet {} +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/slice.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/slice.rs new file mode 100644 index 000000000000..f980e974468e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/slice.rs @@ -0,0 +1,340 @@ +use super::{Bucket, Entries, IndexSet, IntoIter, Iter}; +use crate::util::try_simplify_range; + +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::ops::{self, Bound, Index, RangeBounds}; + +/// A dynamically-sized slice of values in an [`IndexSet`]. +/// +/// This supports indexed operations much like a `[T]` slice, +/// but not any hashed operations on the values. +/// +/// Unlike `IndexSet`, `Slice` does consider the order for [`PartialEq`] +/// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`]. +#[repr(transparent)] +pub struct Slice { + pub(crate) entries: [Bucket], +} + +// SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, +// and reference lifetimes are bound together in function signatures. +#[allow(unsafe_code)] +impl Slice { + pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { + unsafe { &*(entries as *const [Bucket] as *const Self) } + } + + pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { + unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } + } + + fn into_boxed(self: Box) -> Box<[Bucket]> { + unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } + } +} + +impl Slice { + pub(crate) fn into_entries(self: Box) -> Vec> { + self.into_boxed().into_vec() + } + + /// Returns an empty slice. + pub const fn new<'a>() -> &'a Self { + Self::from_slice(&[]) + } + + /// Return the number of elements in the set slice. + pub const fn len(&self) -> usize { + self.entries.len() + } + + /// Returns true if the set slice contains no elements. + pub const fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Get a value by index. + /// + /// Valid indices are `0 <= index < self.len()`. + pub fn get_index(&self, index: usize) -> Option<&T> { + self.entries.get(index).map(Bucket::key_ref) + } + + /// Returns a slice of values in the given range of indices. + /// + /// Valid indices are `0 <= index < self.len()`. + pub fn get_range>(&self, range: R) -> Option<&Self> { + let range = try_simplify_range(range, self.entries.len())?; + self.entries.get(range).map(Self::from_slice) + } + + /// Get the first value. + pub fn first(&self) -> Option<&T> { + self.entries.first().map(Bucket::key_ref) + } + + /// Get the last value. + pub fn last(&self) -> Option<&T> { + self.entries.last().map(Bucket::key_ref) + } + + /// Divides one slice into two at an index. + /// + /// ***Panics*** if `index > len`. + pub fn split_at(&self, index: usize) -> (&Self, &Self) { + let (first, second) = self.entries.split_at(index); + (Self::from_slice(first), Self::from_slice(second)) + } + + /// Returns the first value and the rest of the slice, + /// or `None` if it is empty. + pub fn split_first(&self) -> Option<(&T, &Self)> { + if let [first, rest @ ..] = &self.entries { + Some((&first.key, Self::from_slice(rest))) + } else { + None + } + } + + /// Returns the last value and the rest of the slice, + /// or `None` if it is empty. + pub fn split_last(&self) -> Option<(&T, &Self)> { + if let [rest @ .., last] = &self.entries { + Some((&last.key, Self::from_slice(rest))) + } else { + None + } + } + + /// Return an iterator over the values of the set slice. + pub fn iter(&self) -> Iter<'_, T> { + Iter::new(&self.entries) + } + + /// Search over a sorted set for a value. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search`] for more details. + /// + /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up in + /// the set this is a slice from using [`IndexSet::get_index_of`], but this can also position + /// missing values. + pub fn binary_search(&self, x: &T) -> Result + where + T: Ord, + { + self.binary_search_by(|p| p.cmp(x)) + } + + /// Search over a sorted set with a comparator function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result + where + F: FnMut(&'a T) -> Ordering, + { + self.entries.binary_search_by(move |a| f(&a.key)) + } + + /// Search over a sorted set with an extraction function. + /// + /// Returns the position where that value is present, or the position where it can be inserted + /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. + /// + /// Computes in **O(log(n))** time. + #[inline] + pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result + where + F: FnMut(&'a T) -> B, + B: Ord, + { + self.binary_search_by(|k| f(k).cmp(b)) + } + + /// Returns the index of the partition point of a sorted set according to the given predicate + /// (the index of the first element of the second partition). + /// + /// See [`slice::partition_point`] for more details. + /// + /// Computes in **O(log(n))** time. + #[must_use] + pub fn partition_point

(&self, mut pred: P) -> usize + where + P: FnMut(&T) -> bool, + { + self.entries.partition_point(move |a| pred(&a.key)) + } +} + +impl<'a, T> IntoIterator for &'a Slice { + type IntoIter = Iter<'a, T>; + type Item = &'a T; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for Box> { + type IntoIter = IntoIter; + type Item = T; + + fn into_iter(self) -> Self::IntoIter { + IntoIter::new(self.into_entries()) + } +} + +impl Default for &'_ Slice { + fn default() -> Self { + Slice::from_slice(&[]) + } +} + +impl Default for Box> { + fn default() -> Self { + Slice::from_boxed(Box::default()) + } +} + +impl Clone for Box> { + fn clone(&self) -> Self { + Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) + } +} + +impl From<&Slice> for Box> { + fn from(slice: &Slice) -> Self { + Slice::from_boxed(Box::from(&slice.entries)) + } +} + +impl fmt::Debug for Slice { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self).finish() + } +} + +impl PartialEq for Slice { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.iter().eq(other) + } +} + +impl Eq for Slice {} + +impl PartialOrd for Slice { + fn partial_cmp(&self, other: &Self) -> Option { + self.iter().partial_cmp(other) + } +} + +impl Ord for Slice { + fn cmp(&self, other: &Self) -> Ordering { + self.iter().cmp(other) + } +} + +impl Hash for Slice { + fn hash(&self, state: &mut H) { + self.len().hash(state); + for value in self { + value.hash(state); + } + } +} + +impl Index for Slice { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + &self.entries[index].key + } +} + +// We can't have `impl> Index` because that conflicts with `Index`. +// Instead, we repeat the implementations for all the core range types. +macro_rules! impl_index { + ($($range:ty),*) => {$( + impl Index<$range> for IndexSet { + type Output = Slice; + + fn index(&self, range: $range) -> &Self::Output { + Slice::from_slice(&self.as_entries()[range]) + } + } + + impl Index<$range> for Slice { + type Output = Self; + + fn index(&self, range: $range) -> &Self::Output { + Slice::from_slice(&self.entries[range]) + } + } + )*} +} +impl_index!( + ops::Range, + ops::RangeFrom, + ops::RangeFull, + ops::RangeInclusive, + ops::RangeTo, + ops::RangeToInclusive, + (Bound, Bound) +); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn slice_index() { + fn check(vec_slice: &[i32], set_slice: &Slice, sub_slice: &Slice) { + assert_eq!(set_slice as *const _, sub_slice as *const _); + itertools::assert_equal(vec_slice, set_slice); + } + + let vec: Vec = (0..10).map(|i| i * i).collect(); + let set: IndexSet = vec.iter().cloned().collect(); + let slice = set.as_slice(); + + // RangeFull + check(&vec[..], &set[..], &slice[..]); + + for i in 0usize..10 { + // Index + assert_eq!(vec[i], set[i]); + assert_eq!(vec[i], slice[i]); + + // RangeFrom + check(&vec[i..], &set[i..], &slice[i..]); + + // RangeTo + check(&vec[..i], &set[..i], &slice[..i]); + + // RangeToInclusive + check(&vec[..=i], &set[..=i], &slice[..=i]); + + // (Bound, Bound) + let bounds = (Bound::Excluded(i), Bound::Unbounded); + check(&vec[i + 1..], &set[bounds], &slice[bounds]); + + for j in i..=10 { + // Range + check(&vec[i..j], &set[i..j], &slice[i..j]); + } + + for j in i..10 { + // RangeInclusive + check(&vec[i..=j], &set[i..=j], &slice[i..=j]); + } + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/tests.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/tests.rs new file mode 100644 index 000000000000..35a076e8de34 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/set/tests.rs @@ -0,0 +1,723 @@ +use super::*; +use std::string::String; + +#[test] +fn it_works() { + let mut set = IndexSet::new(); + assert_eq!(set.is_empty(), true); + set.insert(1); + set.insert(1); + assert_eq!(set.len(), 1); + assert!(set.get(&1).is_some()); + assert_eq!(set.is_empty(), false); +} + +#[test] +fn new() { + let set = IndexSet::::new(); + println!("{:?}", set); + assert_eq!(set.capacity(), 0); + assert_eq!(set.len(), 0); + assert_eq!(set.is_empty(), true); +} + +#[test] +fn insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(set.len(), i); + set.insert(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + println!("{:?}", set); + + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } +} + +#[test] +fn insert_full() { + let insert = vec![9, 2, 7, 1, 4, 6, 13]; + let present = vec![1, 6, 2]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(set.len(), i); + let (index, success) = set.insert_full(elt); + assert!(success); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), i + 1); + } + + let len = set.len(); + for &elt in &present { + let (index, success) = set.insert_full(elt); + assert!(!success); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), len); + } +} + +#[test] +fn insert_2() { + let mut set = IndexSet::with_capacity(16); + + let mut values = vec![]; + values.extend(0..16); + values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); + + for &i in &values { + let old_set = set.clone(); + set.insert(i); + for value in old_set.iter() { + if set.get(value).is_none() { + println!("old_set: {:?}", old_set); + println!("set: {:?}", set); + panic!("did not find {} in set", value); + } + } + } + + for &i in &values { + assert!(set.get(&i).is_some(), "did not find {}", i); + } +} + +#[test] +fn insert_dup() { + let mut elements = vec![0, 2, 4, 6, 8]; + let mut set: IndexSet = elements.drain(..).collect(); + { + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + { + let inserted = set.insert(0); + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(inserted, false); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } +} + +#[test] +fn insert_order() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().zip(set.iter()) { + assert_eq!(a, b); + } + for (i, v) in (0..insert.len()).zip(set.iter()) { + assert_eq!(set.get_index(i).unwrap(), v); + } +} + +#[test] +fn shift_insert() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.shift_insert(0, elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().rev().zip(set.iter()) { + assert_eq!(a, b); + } + for (i, v) in (0..insert.len()).zip(set.iter()) { + assert_eq!(set.get_index(i).unwrap(), v); + } + + // "insert" that moves an existing entry + set.shift_insert(0, insert[0]); + assert_eq!(set.iter().count(), insert.len()); + assert_eq!(insert[0], set[0]); + for (a, b) in insert[1..].iter().rev().zip(set.iter().skip(1)) { + assert_eq!(a, b); + } +} + +#[test] +fn replace() { + let replace = [0, 4, 2, 12, 8, 7, 11, 5]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(replace.len()); + + for (i, &elt) in replace.iter().enumerate() { + assert_eq!(set.len(), i); + set.replace(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + println!("{:?}", set); + + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } +} + +#[test] +fn replace_full() { + let replace = vec![9, 2, 7, 1, 4, 6, 13]; + let present = vec![1, 6, 2]; + let mut set = IndexSet::with_capacity(replace.len()); + + for (i, &elt) in replace.iter().enumerate() { + assert_eq!(set.len(), i); + let (index, replaced) = set.replace_full(elt); + assert!(replaced.is_none()); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), i + 1); + } + + let len = set.len(); + for &elt in &present { + let (index, replaced) = set.replace_full(elt); + assert_eq!(Some(elt), replaced); + assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); + assert_eq!(set.len(), len); + } +} + +#[test] +fn replace_2() { + let mut set = IndexSet::with_capacity(16); + + let mut values = vec![]; + values.extend(0..16); + values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); + + for &i in &values { + let old_set = set.clone(); + set.replace(i); + for value in old_set.iter() { + if set.get(value).is_none() { + println!("old_set: {:?}", old_set); + println!("set: {:?}", set); + panic!("did not find {} in set", value); + } + } + } + + for &i in &values { + assert!(set.get(&i).is_some(), "did not find {}", i); + } +} + +#[test] +fn replace_dup() { + let mut elements = vec![0, 2, 4, 6, 8]; + let mut set: IndexSet = elements.drain(..).collect(); + { + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } + { + let replaced = set.replace(0); + let (i, v) = set.get_full(&0).unwrap(); + assert_eq!(set.len(), 5); + assert_eq!(replaced, Some(0)); + assert_eq!(i, 0); + assert_eq!(*v, 0); + } +} + +#[test] +fn replace_order() { + let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &replace { + set.replace(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), replace.len()); + for (a, b) in replace.iter().zip(set.iter()) { + assert_eq!(a, b); + } + for (i, v) in (0..replace.len()).zip(set.iter()) { + assert_eq!(set.get_index(i).unwrap(), v); + } +} + +#[test] +fn replace_change() { + // Check pointers to make sure it really changes + let mut set = indexset!(vec![42]); + let old_ptr = set[0].as_ptr(); + let new = set[0].clone(); + let new_ptr = new.as_ptr(); + assert_ne!(old_ptr, new_ptr); + let replaced = set.replace(new).unwrap(); + assert_eq!(replaced.as_ptr(), old_ptr); +} + +#[test] +fn grow() { + let insert = [0, 4, 2, 12, 8, 7, 11]; + let not_present = [1, 3, 6, 9, 10]; + let mut set = IndexSet::with_capacity(insert.len()); + + for (i, &elt) in insert.iter().enumerate() { + assert_eq!(set.len(), i); + set.insert(elt); + assert_eq!(set.len(), i + 1); + assert_eq!(set.get(&elt), Some(&elt)); + } + + println!("{:?}", set); + for &elt in &insert { + set.insert(elt * 10); + } + for &elt in &insert { + set.insert(elt * 100); + } + for (i, &elt) in insert.iter().cycle().enumerate().take(100) { + set.insert(elt * 100 + i as i32); + } + println!("{:?}", set); + for &elt in ¬_present { + assert!(set.get(&elt).is_none()); + } +} + +#[test] +fn reserve() { + let mut set = IndexSet::::new(); + assert_eq!(set.capacity(), 0); + set.reserve(100); + let capacity = set.capacity(); + assert!(capacity >= 100); + for i in 0..capacity { + assert_eq!(set.len(), i); + set.insert(i); + assert_eq!(set.len(), i + 1); + assert_eq!(set.capacity(), capacity); + assert_eq!(set.get(&i), Some(&i)); + } + set.insert(capacity); + assert_eq!(set.len(), capacity + 1); + assert!(set.capacity() > capacity); + assert_eq!(set.get(&capacity), Some(&capacity)); +} + +#[test] +fn try_reserve() { + let mut set = IndexSet::::new(); + assert_eq!(set.capacity(), 0); + assert_eq!(set.try_reserve(100), Ok(())); + assert!(set.capacity() >= 100); + assert!(set.try_reserve(usize::MAX).is_err()); +} + +#[test] +fn shrink_to_fit() { + let mut set = IndexSet::::new(); + assert_eq!(set.capacity(), 0); + for i in 0..100 { + assert_eq!(set.len(), i); + set.insert(i); + assert_eq!(set.len(), i + 1); + assert!(set.capacity() >= i + 1); + assert_eq!(set.get(&i), Some(&i)); + set.shrink_to_fit(); + assert_eq!(set.len(), i + 1); + assert_eq!(set.capacity(), i + 1); + assert_eq!(set.get(&i), Some(&i)); + } +} + +#[test] +fn remove() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + assert_eq!(set.iter().count(), set.len()); + assert_eq!(set.iter().count(), insert.len()); + for (a, b) in insert.iter().zip(set.iter()) { + assert_eq!(a, b); + } + + let remove_fail = [99, 77]; + let remove = [4, 12, 8, 7]; + + for &value in &remove_fail { + assert!(set.swap_remove_full(&value).is_none()); + } + println!("{:?}", set); + for &value in &remove { + //println!("{:?}", set); + let index = set.get_full(&value).unwrap().0; + assert_eq!(set.swap_remove_full(&value), Some((index, value))); + } + println!("{:?}", set); + + for value in &insert { + assert_eq!(set.get(value).is_some(), !remove.contains(value)); + } + assert_eq!(set.len(), insert.len() - remove.len()); + assert_eq!(set.iter().count(), insert.len() - remove.len()); +} + +#[test] +fn swap_remove_index() { + let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; + let mut set = IndexSet::new(); + + for &elt in &insert { + set.insert(elt); + } + + let mut vector = insert.to_vec(); + let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; + + // check that the same swap remove sequence on vec and set + // have the same result. + for &rm in remove_sequence { + let out_vec = vector.swap_remove(rm); + let out_set = set.swap_remove_index(rm).unwrap(); + assert_eq!(out_vec, out_set); + } + assert_eq!(vector.len(), set.len()); + for (a, b) in vector.iter().zip(set.iter()) { + assert_eq!(a, b); + } +} + +#[test] +fn partial_eq_and_eq() { + let mut set_a = IndexSet::new(); + set_a.insert(1); + set_a.insert(2); + let mut set_b = set_a.clone(); + assert_eq!(set_a, set_b); + set_b.swap_remove(&1); + assert_ne!(set_a, set_b); + + let set_c: IndexSet<_> = set_b.into_iter().collect(); + assert_ne!(set_a, set_c); + assert_ne!(set_c, set_a); +} + +#[test] +fn extend() { + let mut set = IndexSet::new(); + set.extend(vec![&1, &2, &3, &4]); + set.extend(vec![5, 6]); + assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); +} + +#[test] +fn comparisons() { + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).collect(); + + assert!(!set_a.is_disjoint(&set_a)); + assert!(set_a.is_subset(&set_a)); + assert!(set_a.is_superset(&set_a)); + + assert!(set_a.is_disjoint(&set_b)); + assert!(set_b.is_disjoint(&set_a)); + assert!(!set_a.is_subset(&set_b)); + assert!(!set_b.is_subset(&set_a)); + assert!(!set_a.is_superset(&set_b)); + assert!(!set_b.is_superset(&set_a)); + + assert!(!set_a.is_disjoint(&set_c)); + assert!(!set_c.is_disjoint(&set_a)); + assert!(set_a.is_subset(&set_c)); + assert!(!set_c.is_subset(&set_a)); + assert!(!set_a.is_superset(&set_c)); + assert!(set_c.is_superset(&set_a)); + + assert!(!set_c.is_disjoint(&set_d)); + assert!(!set_d.is_disjoint(&set_c)); + assert!(!set_c.is_subset(&set_d)); + assert!(!set_d.is_subset(&set_c)); + assert!(!set_c.is_superset(&set_d)); + assert!(!set_d.is_superset(&set_c)); +} + +#[test] +fn iter_comparisons() { + use std::iter::empty; + + fn check<'a, I1, I2>(iter1: I1, iter2: I2) + where + I1: Iterator, + I2: Iterator, + { + assert!(iter1.copied().eq(iter2)); + } + + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + check(set_a.difference(&set_a), empty()); + check(set_a.symmetric_difference(&set_a), empty()); + check(set_a.intersection(&set_a), 0..3); + check(set_a.union(&set_a), 0..3); + + check(set_a.difference(&set_b), 0..3); + check(set_b.difference(&set_a), 3..6); + check(set_a.symmetric_difference(&set_b), 0..6); + check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); + check(set_a.intersection(&set_b), empty()); + check(set_b.intersection(&set_a), empty()); + check(set_a.union(&set_b), 0..6); + check(set_b.union(&set_a), (3..6).chain(0..3)); + + check(set_a.difference(&set_c), empty()); + check(set_c.difference(&set_a), 3..6); + check(set_a.symmetric_difference(&set_c), 3..6); + check(set_c.symmetric_difference(&set_a), 3..6); + check(set_a.intersection(&set_c), 0..3); + check(set_c.intersection(&set_a), 0..3); + check(set_a.union(&set_c), 0..6); + check(set_c.union(&set_a), 0..6); + + check(set_c.difference(&set_d), 0..3); + check(set_d.difference(&set_c), (6..9).rev()); + check( + set_c.symmetric_difference(&set_d), + (0..3).chain((6..9).rev()), + ); + check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); + check(set_c.intersection(&set_d), 3..6); + check(set_d.intersection(&set_c), (3..6).rev()); + check(set_c.union(&set_d), (0..6).chain((6..9).rev())); + check(set_d.union(&set_c), (3..9).rev().chain(0..3)); +} + +#[test] +fn ops() { + let empty = IndexSet::::new(); + let set_a: IndexSet<_> = (0..3).collect(); + let set_b: IndexSet<_> = (3..6).collect(); + let set_c: IndexSet<_> = (0..6).collect(); + let set_d: IndexSet<_> = (3..9).rev().collect(); + + #[allow(clippy::eq_op)] + { + assert_eq!(&set_a & &set_a, set_a); + assert_eq!(&set_a | &set_a, set_a); + assert_eq!(&set_a ^ &set_a, empty); + assert_eq!(&set_a - &set_a, empty); + } + + assert_eq!(&set_a & &set_b, empty); + assert_eq!(&set_b & &set_a, empty); + assert_eq!(&set_a | &set_b, set_c); + assert_eq!(&set_b | &set_a, set_c); + assert_eq!(&set_a ^ &set_b, set_c); + assert_eq!(&set_b ^ &set_a, set_c); + assert_eq!(&set_a - &set_b, set_a); + assert_eq!(&set_b - &set_a, set_b); + + assert_eq!(&set_a & &set_c, set_a); + assert_eq!(&set_c & &set_a, set_a); + assert_eq!(&set_a | &set_c, set_c); + assert_eq!(&set_c | &set_a, set_c); + assert_eq!(&set_a ^ &set_c, set_b); + assert_eq!(&set_c ^ &set_a, set_b); + assert_eq!(&set_a - &set_c, empty); + assert_eq!(&set_c - &set_a, set_b); + + assert_eq!(&set_c & &set_d, set_b); + assert_eq!(&set_d & &set_c, set_b); + assert_eq!(&set_c | &set_d, &set_a | &set_d); + assert_eq!(&set_d | &set_c, &set_a | &set_d); + assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); + assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); + assert_eq!(&set_c - &set_d, set_a); + assert_eq!(&set_d - &set_c, &set_d - &set_b); +} + +#[test] +#[cfg(feature = "std")] +fn from_array() { + let set1 = IndexSet::from([1, 2, 3, 4]); + let set2: IndexSet<_> = [1, 2, 3, 4].into(); + + assert_eq!(set1, set2); +} + +#[test] +fn iter_default() { + struct Item; + fn assert_default() + where + T: Default + Iterator, + { + assert!(T::default().next().is_none()); + } + assert_default::>(); + assert_default::>(); +} + +#[test] +fn test_binary_search_by() { + // adapted from std's test for binary_search + let b: IndexSet = [].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(0)); + + let b: IndexSet = [4].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&3)), Err(0)); + assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Ok(0)); + assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(1)); + + let b: IndexSet = [1, 2, 4, 6, 8, 9].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3)); + assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3)); + assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(4)); + assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(4)); + + let b: IndexSet = [1, 2, 4, 5, 6, 8].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&9)), Err(6)); + + let b: IndexSet = [1, 2, 4, 6, 7, 8, 9].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3)); + assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3)); + assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(5)); + + let b: IndexSet = [1, 2, 4, 5, 6, 8, 9].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(5)); + assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0)); + + let b: IndexSet = [1, 3, 3, 3, 7].into(); + assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0)); + assert_eq!(b.binary_search_by(|x| x.cmp(&1)), Ok(0)); + assert_eq!(b.binary_search_by(|x| x.cmp(&2)), Err(1)); + // diff from std as set merges the duplicate keys + assert!(match b.binary_search_by(|x| x.cmp(&3)) { + Ok(1..=2) => true, + _ => false, + }); + assert!(match b.binary_search_by(|x| x.cmp(&3)) { + Ok(1..=2) => true, + _ => false, + }); + assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Err(2)); + assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(2)); + assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Err(2)); + assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Ok(2)); + assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Err(3)); +} + +#[test] +fn test_binary_search_by_key() { + // adapted from std's test for binary_search + let b: IndexSet = [].into(); + assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(0)); + + let b: IndexSet = [4].into(); + assert_eq!(b.binary_search_by_key(&3, |&x| x), Err(0)); + assert_eq!(b.binary_search_by_key(&4, |&x| x), Ok(0)); + assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(1)); + + let b: IndexSet = [1, 2, 4, 6, 8, 9].into(); + assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3)); + assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3)); + assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(4)); + assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(4)); + + let b: IndexSet = [1, 2, 4, 5, 6, 8].into(); + assert_eq!(b.binary_search_by_key(&9, |&x| x), Err(6)); + + let b: IndexSet = [1, 2, 4, 6, 7, 8, 9].into(); + assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3)); + assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3)); + assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(5)); + + let b: IndexSet = [1, 2, 4, 5, 6, 8, 9].into(); + assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(5)); + assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0)); + + let b: IndexSet = [1, 3, 3, 3, 7].into(); + assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0)); + assert_eq!(b.binary_search_by_key(&1, |&x| x), Ok(0)); + assert_eq!(b.binary_search_by_key(&2, |&x| x), Err(1)); + // diff from std as set merges the duplicate keys + assert!(match b.binary_search_by_key(&3, |&x| x) { + Ok(1..=2) => true, + _ => false, + }); + assert!(match b.binary_search_by_key(&3, |&x| x) { + Ok(1..=2) => true, + _ => false, + }); + assert_eq!(b.binary_search_by_key(&4, |&x| x), Err(2)); + assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(2)); + assert_eq!(b.binary_search_by_key(&6, |&x| x), Err(2)); + assert_eq!(b.binary_search_by_key(&7, |&x| x), Ok(2)); + assert_eq!(b.binary_search_by_key(&8, |&x| x), Err(3)); +} + +#[test] +fn test_partition_point() { + // adapted from std's test for partition_point + let b: IndexSet = [].into(); + assert_eq!(b.partition_point(|&x| x < 5), 0); + + let b: IndexSet<_> = [4].into(); + assert_eq!(b.partition_point(|&x| x < 3), 0); + assert_eq!(b.partition_point(|&x| x < 4), 0); + assert_eq!(b.partition_point(|&x| x < 5), 1); + + let b: IndexSet<_> = [1, 2, 4, 6, 8, 9].into(); + assert_eq!(b.partition_point(|&x| x < 5), 3); + assert_eq!(b.partition_point(|&x| x < 6), 3); + assert_eq!(b.partition_point(|&x| x < 7), 4); + assert_eq!(b.partition_point(|&x| x < 8), 4); + + let b: IndexSet<_> = [1, 2, 4, 5, 6, 8].into(); + assert_eq!(b.partition_point(|&x| x < 9), 6); + + let b: IndexSet<_> = [1, 2, 4, 6, 7, 8, 9].into(); + assert_eq!(b.partition_point(|&x| x < 6), 3); + assert_eq!(b.partition_point(|&x| x < 5), 3); + assert_eq!(b.partition_point(|&x| x < 8), 5); + + let b: IndexSet<_> = [1, 2, 4, 5, 6, 8, 9].into(); + assert_eq!(b.partition_point(|&x| x < 7), 5); + assert_eq!(b.partition_point(|&x| x < 0), 0); + + let b: IndexSet<_> = [1, 3, 3, 3, 7].into(); + assert_eq!(b.partition_point(|&x| x < 0), 0); + assert_eq!(b.partition_point(|&x| x < 1), 0); + assert_eq!(b.partition_point(|&x| x < 2), 1); + assert_eq!(b.partition_point(|&x| x < 3), 1); + assert_eq!(b.partition_point(|&x| x < 4), 2); // diff from std as set merges the duplicate keys + assert_eq!(b.partition_point(|&x| x < 5), 2); + assert_eq!(b.partition_point(|&x| x < 6), 2); + assert_eq!(b.partition_point(|&x| x < 7), 2); + assert_eq!(b.partition_point(|&x| x < 8), 3); +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/util.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/util.rs new file mode 100644 index 000000000000..377ff516f0db --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/src/util.rs @@ -0,0 +1,53 @@ +use core::ops::{Bound, Range, RangeBounds}; + +pub(crate) fn third(t: (A, B, C)) -> C { + t.2 +} + +pub(crate) fn simplify_range(range: R, len: usize) -> Range +where + R: RangeBounds, +{ + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(&i) if i <= len => i, + Bound::Excluded(&i) if i < len => i + 1, + bound => panic!("range start {:?} should be <= length {}", bound, len), + }; + let end = match range.end_bound() { + Bound::Unbounded => len, + Bound::Excluded(&i) if i <= len => i, + Bound::Included(&i) if i < len => i + 1, + bound => panic!("range end {:?} should be <= length {}", bound, len), + }; + if start > end { + panic!( + "range start {:?} should be <= range end {:?}", + range.start_bound(), + range.end_bound() + ); + } + start..end +} + +pub(crate) fn try_simplify_range(range: R, len: usize) -> Option> +where + R: RangeBounds, +{ + let start = match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(&i) if i <= len => i, + Bound::Excluded(&i) if i < len => i + 1, + _ => return None, + }; + let end = match range.end_bound() { + Bound::Unbounded => len, + Bound::Excluded(&i) if i <= len => i, + Bound::Included(&i) if i < len => i + 1, + _ => return None, + }; + if start > end { + return None; + } + Some(start..end) +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/equivalent_trait.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/equivalent_trait.rs new file mode 100644 index 000000000000..ff5943a3edb8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/equivalent_trait.rs @@ -0,0 +1,53 @@ +use indexmap::indexmap; +use indexmap::Equivalent; + +use std::hash::Hash; + +#[derive(Debug, Hash)] +pub struct Pair(pub A, pub B); + +impl PartialEq<(A, B)> for Pair +where + C: PartialEq, + D: PartialEq, +{ + fn eq(&self, rhs: &(A, B)) -> bool { + self.0 == rhs.0 && self.1 == rhs.1 + } +} + +impl Equivalent for Pair +where + Pair: PartialEq, + A: Hash + Eq, + B: Hash + Eq, +{ + fn equivalent(&self, other: &X) -> bool { + *self == *other + } +} + +#[test] +fn test_lookup() { + let s = String::from; + let map = indexmap! { + (s("a"), s("b")) => 1, + (s("a"), s("x")) => 2, + }; + + assert!(map.contains_key(&Pair("a", "b"))); + assert!(!map.contains_key(&Pair("b", "a"))); +} + +#[test] +fn test_string_str() { + let s = String::from; + let mut map = indexmap! { + s("a") => 1, s("b") => 2, + s("x") => 3, s("y") => 4, + }; + + assert!(map.contains_key("a")); + assert!(!map.contains_key("z")); + assert_eq!(map.swap_remove("b"), Some(2)); +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/macros_full_path.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/macros_full_path.rs new file mode 100644 index 000000000000..2467d9b4f5d4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/macros_full_path.rs @@ -0,0 +1,19 @@ +#[test] +fn test_create_map() { + let _m = indexmap::indexmap! { + 1 => 2, + 7 => 1, + 2 => 2, + 3 => 3, + }; +} + +#[test] +fn test_create_set() { + let _s = indexmap::indexset! { + 1, + 7, + 2, + 3, + }; +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/quick.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/quick.rs new file mode 100644 index 000000000000..56afee7239f3 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/quick.rs @@ -0,0 +1,759 @@ +use indexmap::{IndexMap, IndexSet}; +use itertools::Itertools; + +use quickcheck::Arbitrary; +use quickcheck::Gen; +use quickcheck::QuickCheck; +use quickcheck::TestResult; + +use fnv::FnvHasher; +use std::hash::{BuildHasher, BuildHasherDefault}; +type FnvBuilder = BuildHasherDefault; +type IndexMapFnv = IndexMap; + +use std::cmp::min; +use std::collections::HashMap; +use std::collections::HashSet; +use std::fmt::Debug; +use std::hash::Hash; +use std::ops::Bound; +use std::ops::Deref; + +use indexmap::map::Entry; +use std::collections::hash_map::Entry as StdEntry; + +fn set<'a, T: 'a, I>(iter: I) -> HashSet +where + I: IntoIterator, + T: Copy + Hash + Eq, +{ + iter.into_iter().copied().collect() +} + +fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap +where + I: IntoIterator, + T: Copy + Hash + Eq, +{ + IndexMap::from_iter(iter.into_iter().copied().map(|k| (k, ()))) +} + +// Helper macro to allow us to use smaller quickcheck limits under miri. +macro_rules! quickcheck_limit { + (@as_items $($i:item)*) => ($($i)*); + { + $( + $(#[$m:meta])* + fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty { + $($code:tt)* + } + )* + } => ( + quickcheck::quickcheck! { + @as_items + $( + #[test] + $(#[$m])* + fn $fn_name() { + fn prop($($arg_name: $arg_ty),*) -> $ret { + $($code)* + } + let mut quickcheck = QuickCheck::new(); + if cfg!(miri) { + quickcheck = quickcheck + .gen(Gen::new(10)) + .tests(10) + .max_tests(100); + } + + quickcheck.quickcheck(prop as fn($($arg_ty),*) -> $ret); + } + )* + } + ) +} + +quickcheck_limit! { + fn contains(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + insert.iter().all(|&key| map.get(&key).is_some()) + } + + fn contains_not(insert: Vec, not: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + let nots = &set(¬) - &set(&insert); + nots.iter().all(|&key| map.get(&key).is_none()) + } + + fn insert_remove(insert: Vec, remove: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + for &key in &remove { + map.swap_remove(&key); + } + let elements = &set(&insert) - &set(&remove); + map.len() == elements.len() && map.iter().count() == elements.len() && + elements.iter().all(|k| map.get(k).is_some()) + } + + fn insertion_order(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + itertools::assert_equal(insert.iter().unique(), map.keys()); + true + } + + fn insert_sorted(insert: Vec<(u32, u32)>) -> bool { + let mut hmap = HashMap::new(); + let mut map = IndexMap::new(); + let mut map2 = IndexMap::new(); + for &(key, value) in &insert { + hmap.insert(key, value); + map.insert_sorted(key, value); + match map2.entry(key) { + Entry::Occupied(e) => *e.into_mut() = value, + Entry::Vacant(e) => { e.insert_sorted(value); } + } + } + itertools::assert_equal(hmap.iter().sorted(), &map); + itertools::assert_equal(&map, &map2); + true + } + + fn pop(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + let mut pops = Vec::new(); + while let Some((key, _v)) = map.pop() { + pops.push(key); + } + pops.reverse(); + + itertools::assert_equal(insert.iter().unique(), &pops); + true + } + + fn with_cap(template: Vec<()>) -> bool { + let cap = template.len(); + let map: IndexMap = IndexMap::with_capacity(cap); + println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize); + map.capacity() >= cap + } + + fn drain_full(insert: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + let mut clone = map.clone(); + let drained = clone.drain(..); + for (key, _) in drained { + map.swap_remove(&key); + } + map.is_empty() + } + + fn drain_bounds(insert: Vec, range: (Bound, Bound)) -> TestResult { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + + // First see if `Vec::drain` is happy with this range. + let result = std::panic::catch_unwind(|| { + let mut keys: Vec = map.keys().copied().collect(); + keys.drain(range); + keys + }); + + if let Ok(keys) = result { + map.drain(range); + // Check that our `drain` matches the same key order. + assert!(map.keys().eq(&keys)); + // Check that hash lookups all work too. + assert!(keys.iter().all(|key| map.contains_key(key))); + TestResult::passed() + } else { + // If `Vec::drain` panicked, so should we. + TestResult::must_fail(move || { map.drain(range); }) + } + } + + fn shift_remove(insert: Vec, remove: Vec) -> bool { + let mut map = IndexMap::new(); + for &key in &insert { + map.insert(key, ()); + } + for &key in &remove { + map.shift_remove(&key); + } + let elements = &set(&insert) - &set(&remove); + + // Check that order is preserved after removals + let mut iter = map.keys(); + for &key in insert.iter().unique() { + if elements.contains(&key) { + assert_eq!(Some(&key), iter.next()); + } + } + + map.len() == elements.len() && map.iter().count() == elements.len() && + elements.iter().all(|k| map.get(k).is_some()) + } + + fn indexing(insert: Vec) -> bool { + let mut map: IndexMap<_, _> = insert.into_iter().map(|x| (x, x)).collect(); + let set: IndexSet<_> = map.keys().copied().collect(); + assert_eq!(map.len(), set.len()); + + for (i, &key) in set.iter().enumerate() { + assert_eq!(map.get_index(i), Some((&key, &key))); + assert_eq!(set.get_index(i), Some(&key)); + assert_eq!(map[i], key); + assert_eq!(set[i], key); + + *map.get_index_mut(i).unwrap().1 >>= 1; + map[i] <<= 1; + } + + set.iter().enumerate().all(|(i, &key)| { + let value = key & !1; + map[&key] == value && map[i] == value + }) + } + + // Use `u8` test indices so quickcheck is less likely to go out of bounds. + fn set_swap_indices(vec: Vec, a: u8, b: u8) -> TestResult { + let mut set = IndexSet::::from_iter(vec); + let a = usize::from(a); + let b = usize::from(b); + + if a >= set.len() || b >= set.len() { + return TestResult::discard(); + } + + let mut vec = Vec::from_iter(set.iter().cloned()); + vec.swap(a, b); + + set.swap_indices(a, b); + + // Check both iteration order and hash lookups + assert!(set.iter().eq(vec.iter())); + assert!(vec.iter().enumerate().all(|(i, x)| { + set.get_index_of(x) == Some(i) + })); + TestResult::passed() + } + + fn map_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { + test_map_swap_indices(vec, from, to, IndexMap::swap_indices) + } + + fn occupied_entry_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { + test_map_swap_indices(vec, from, to, |map, from, to| { + let key = map.keys()[from]; + match map.entry(key) { + Entry::Occupied(entry) => entry.swap_indices(to), + _ => unreachable!(), + } + }) + } + + fn indexed_entry_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { + test_map_swap_indices(vec, from, to, |map, from, to| { + map.get_index_entry(from).unwrap().swap_indices(to); + }) + } + + fn raw_occupied_entry_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { + use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut}; + test_map_swap_indices(vec, from, to, |map, from, to| { + let key = map.keys()[from]; + match map.raw_entry_mut_v1().from_key(&key) { + RawEntryMut::Occupied(entry) => entry.swap_indices(to), + _ => unreachable!(), + } + }) + } + + // Use `u8` test indices so quickcheck is less likely to go out of bounds. + fn set_move_index(vec: Vec, from: u8, to: u8) -> TestResult { + let mut set = IndexSet::::from_iter(vec); + let from = usize::from(from); + let to = usize::from(to); + + if from >= set.len() || to >= set.len() { + return TestResult::discard(); + } + + let mut vec = Vec::from_iter(set.iter().cloned()); + let x = vec.remove(from); + vec.insert(to, x); + + set.move_index(from, to); + + // Check both iteration order and hash lookups + assert!(set.iter().eq(vec.iter())); + assert!(vec.iter().enumerate().all(|(i, x)| { + set.get_index_of(x) == Some(i) + })); + TestResult::passed() + } + + fn map_move_index(vec: Vec, from: u8, to: u8) -> TestResult { + test_map_move_index(vec, from, to, IndexMap::move_index) + } + + fn occupied_entry_move_index(vec: Vec, from: u8, to: u8) -> TestResult { + test_map_move_index(vec, from, to, |map, from, to| { + let key = map.keys()[from]; + match map.entry(key) { + Entry::Occupied(entry) => entry.move_index(to), + _ => unreachable!(), + } + }) + } + + fn indexed_entry_move_index(vec: Vec, from: u8, to: u8) -> TestResult { + test_map_move_index(vec, from, to, |map, from, to| { + map.get_index_entry(from).unwrap().move_index(to); + }) + } + + fn raw_occupied_entry_move_index(vec: Vec, from: u8, to: u8) -> TestResult { + use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut}; + test_map_move_index(vec, from, to, |map, from, to| { + let key = map.keys()[from]; + match map.raw_entry_mut_v1().from_key(&key) { + RawEntryMut::Occupied(entry) => entry.move_index(to), + _ => unreachable!(), + } + }) + } + + fn occupied_entry_shift_insert(vec: Vec, i: u8) -> TestResult { + test_map_shift_insert(vec, i, |map, i, key| { + match map.entry(key) { + Entry::Vacant(entry) => entry.shift_insert(i, ()), + _ => unreachable!(), + }; + }) + } + + fn raw_occupied_entry_shift_insert(vec: Vec, i: u8) -> TestResult { + use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut}; + test_map_shift_insert(vec, i, |map, i, key| { + match map.raw_entry_mut_v1().from_key(&key) { + RawEntryMut::Vacant(entry) => entry.shift_insert(i, key, ()), + _ => unreachable!(), + }; + }) + } +} + +fn test_map_swap_indices(vec: Vec, a: u8, b: u8, swap_indices: F) -> TestResult +where + F: FnOnce(&mut IndexMap, usize, usize), +{ + let mut map = IndexMap::::from_iter(vec.into_iter().map(|k| (k, ()))); + let a = usize::from(a); + let b = usize::from(b); + + if a >= map.len() || b >= map.len() { + return TestResult::discard(); + } + + let mut vec = Vec::from_iter(map.keys().copied()); + vec.swap(a, b); + + swap_indices(&mut map, a, b); + + // Check both iteration order and hash lookups + assert!(map.keys().eq(vec.iter())); + assert!(vec + .iter() + .enumerate() + .all(|(i, x)| { map.get_index_of(x) == Some(i) })); + TestResult::passed() +} + +fn test_map_move_index(vec: Vec, from: u8, to: u8, move_index: F) -> TestResult +where + F: FnOnce(&mut IndexMap, usize, usize), +{ + let mut map = IndexMap::::from_iter(vec.into_iter().map(|k| (k, ()))); + let from = usize::from(from); + let to = usize::from(to); + + if from >= map.len() || to >= map.len() { + return TestResult::discard(); + } + + let mut vec = Vec::from_iter(map.keys().copied()); + let x = vec.remove(from); + vec.insert(to, x); + + move_index(&mut map, from, to); + + // Check both iteration order and hash lookups + assert!(map.keys().eq(vec.iter())); + assert!(vec + .iter() + .enumerate() + .all(|(i, x)| { map.get_index_of(x) == Some(i) })); + TestResult::passed() +} + +fn test_map_shift_insert(vec: Vec, i: u8, shift_insert: F) -> TestResult +where + F: FnOnce(&mut IndexMap, usize, u8), +{ + let mut map = IndexMap::::from_iter(vec.into_iter().map(|k| (k, ()))); + let i = usize::from(i); + if i >= map.len() { + return TestResult::discard(); + } + + let mut vec = Vec::from_iter(map.keys().copied()); + let x = vec.pop().unwrap(); + vec.insert(i, x); + + let (last, ()) = map.pop().unwrap(); + assert_eq!(x, last); + map.shrink_to_fit(); // so we might have to grow and rehash the table + + shift_insert(&mut map, i, last); + + // Check both iteration order and hash lookups + assert!(map.keys().eq(vec.iter())); + assert!(vec + .iter() + .enumerate() + .all(|(i, x)| { map.get_index_of(x) == Some(i) })); + TestResult::passed() +} + +use crate::Op::*; +#[derive(Copy, Clone, Debug)] +enum Op { + Add(K, V), + Remove(K), + AddEntry(K, V), + RemoveEntry(K), +} + +impl Arbitrary for Op +where + K: Arbitrary, + V: Arbitrary, +{ + fn arbitrary(g: &mut Gen) -> Self { + match u32::arbitrary(g) % 4 { + 0 => Add(K::arbitrary(g), V::arbitrary(g)), + 1 => AddEntry(K::arbitrary(g), V::arbitrary(g)), + 2 => Remove(K::arbitrary(g)), + _ => RemoveEntry(K::arbitrary(g)), + } + } +} + +fn do_ops(ops: &[Op], a: &mut IndexMap, b: &mut HashMap) +where + K: Hash + Eq + Clone, + V: Clone, + S: BuildHasher, +{ + for op in ops { + match *op { + Add(ref k, ref v) => { + a.insert(k.clone(), v.clone()); + b.insert(k.clone(), v.clone()); + } + AddEntry(ref k, ref v) => { + a.entry(k.clone()).or_insert_with(|| v.clone()); + b.entry(k.clone()).or_insert_with(|| v.clone()); + } + Remove(ref k) => { + a.swap_remove(k); + b.remove(k); + } + RemoveEntry(ref k) => { + if let Entry::Occupied(ent) = a.entry(k.clone()) { + ent.swap_remove_entry(); + } + if let StdEntry::Occupied(ent) = b.entry(k.clone()) { + ent.remove_entry(); + } + } + } + //println!("{:?}", a); + } +} + +fn assert_maps_equivalent(a: &IndexMap, b: &HashMap) -> bool +where + K: Hash + Eq + Debug, + V: Eq + Debug, +{ + assert_eq!(a.len(), b.len()); + assert_eq!(a.iter().next().is_some(), b.iter().next().is_some()); + for key in a.keys() { + assert!(b.contains_key(key), "b does not contain {:?}", key); + } + for key in b.keys() { + assert!(a.get(key).is_some(), "a does not contain {:?}", key); + } + for key in a.keys() { + assert_eq!(a[key], b[key]); + } + true +} + +quickcheck_limit! { + fn operations_i8(ops: Large>>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + assert_maps_equivalent(&map, &reference) + } + + fn operations_string(ops: Vec>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + assert_maps_equivalent(&map, &reference) + } + + fn keys_values(ops: Large>>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + let mut visit = IndexMap::new(); + for (k, v) in map.keys().zip(map.values()) { + assert_eq!(&map[k], v); + assert!(!visit.contains_key(k)); + visit.insert(*k, *v); + } + assert_eq!(visit.len(), reference.len()); + true + } + + fn keys_values_mut(ops: Large>>) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops, &mut map, &mut reference); + let mut visit = IndexMap::new(); + let keys = Vec::from_iter(map.keys().copied()); + for (k, v) in keys.iter().zip(map.values_mut()) { + assert_eq!(&reference[k], v); + assert!(!visit.contains_key(k)); + visit.insert(*k, *v); + } + assert_eq!(visit.len(), reference.len()); + true + } + + fn equality(ops1: Vec>, removes: Vec) -> bool { + let mut map = IndexMap::new(); + let mut reference = HashMap::new(); + do_ops(&ops1, &mut map, &mut reference); + let mut ops2 = ops1.clone(); + for &r in &removes { + if !ops2.is_empty() { + let i = r % ops2.len(); + ops2.remove(i); + } + } + let mut map2 = IndexMapFnv::default(); + let mut reference2 = HashMap::new(); + do_ops(&ops2, &mut map2, &mut reference2); + assert_eq!(map == map2, reference == reference2); + true + } + + fn retain_ordered(keys: Large>, remove: Large>) -> () { + let mut map = indexmap(keys.iter()); + let initial_map = map.clone(); // deduplicated in-order input + let remove_map = indexmap(remove.iter()); + let keys_s = set(keys.iter()); + let remove_s = set(remove.iter()); + let answer = &keys_s - &remove_s; + map.retain(|k, _| !remove_map.contains_key(k)); + + // check the values + assert_eq!(map.len(), answer.len()); + for key in &answer { + assert!(map.contains_key(key)); + } + // check the order + itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k))); + } + + fn sort_1(keyvals: Large>) -> () { + let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); + let mut answer = keyvals.0; + answer.sort_by_key(|t| t.0); + + // reverse dedup: Because IndexMap::from_iter keeps the last value for + // identical keys + answer.reverse(); + answer.dedup_by_key(|t| t.0); + answer.reverse(); + + map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2)); + + // check it contains all the values it should + for &(key, val) in &answer { + assert_eq!(map[&key], val); + } + + // check the order + + let mapv = Vec::from_iter(map); + assert_eq!(answer, mapv); + + } + + fn sort_2(keyvals: Large>) -> () { + let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); + map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2)); + assert_sorted_by_key(map, |t| t.1); + } + + fn sort_3(keyvals: Large>) -> () { + let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); + map.sort_by_cached_key(|&k, _| std::cmp::Reverse(k)); + assert_sorted_by_key(map, |t| std::cmp::Reverse(t.0)); + } + + fn reverse(keyvals: Large>) -> () { + let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); + + fn generate_answer(input: &Vec<(i8, i8)>) -> Vec<(i8, i8)> { + // to mimic what `IndexMap::from_iter` does: + // need to get (A) the unique keys in forward order, and (B) the + // last value of each of those keys. + + // create (A): an iterable that yields the unique keys in ltr order + let mut seen_keys = HashSet::new(); + let unique_keys_forward = input.iter().filter_map(move |(k, _)| { + if seen_keys.contains(k) { None } + else { seen_keys.insert(*k); Some(*k) } + }); + + // create (B): a mapping of keys to the last value seen for that key + // this is the same as reversing the input and taking the first + // value seen for that key! + let mut last_val_per_key = HashMap::new(); + for &(k, v) in input.iter().rev() { + if !last_val_per_key.contains_key(&k) { + last_val_per_key.insert(k, v); + } + } + + // iterate over the keys in (A) in order, and match each one with + // the corresponding last value from (B) + let mut ans: Vec<_> = unique_keys_forward + .map(|k| (k, *last_val_per_key.get(&k).unwrap())) + .collect(); + + // finally, since this test is testing `.reverse()`, reverse the + // answer in-place + ans.reverse(); + + ans + } + + let answer = generate_answer(&keyvals.0); + + // perform the work + map.reverse(); + + // check it contains all the values it should + for &(key, val) in &answer { + assert_eq!(map[&key], val); + } + + // check the order + let mapv = Vec::from_iter(map); + assert_eq!(answer, mapv); + } +} + +fn assert_sorted_by_key(iterable: I, key: Key) +where + I: IntoIterator, + I::Item: Ord + Clone + Debug, + Key: Fn(&I::Item) -> X, + X: Ord, +{ + let input = Vec::from_iter(iterable); + let mut sorted = input.clone(); + sorted.sort_by_key(key); + assert_eq!(input, sorted); +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +struct Alpha(String); + +impl Deref for Alpha { + type Target = String; + fn deref(&self) -> &String { + &self.0 + } +} + +const ALPHABET: &[u8] = b"abcdefghijklmnopqrstuvwxyz"; + +impl Arbitrary for Alpha { + fn arbitrary(g: &mut Gen) -> Self { + let len = usize::arbitrary(g) % g.size(); + let len = min(len, 16); + Alpha( + (0..len) + .map(|_| ALPHABET[usize::arbitrary(g) % ALPHABET.len()] as char) + .collect(), + ) + } + + fn shrink(&self) -> Box> { + Box::new((**self).shrink().map(Alpha)) + } +} + +/// quickcheck Arbitrary adaptor -- make a larger vec +#[derive(Clone, Debug)] +struct Large(T); + +impl Deref for Large { + type Target = T; + fn deref(&self) -> &T { + &self.0 + } +} + +impl Arbitrary for Large> +where + T: Arbitrary, +{ + fn arbitrary(g: &mut Gen) -> Self { + let len = usize::arbitrary(g) % (g.size() * 10); + Large((0..len).map(|_| T::arbitrary(g)).collect()) + } + + fn shrink(&self) -> Box> { + Box::new((**self).shrink().map(Large)) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/tests.rs b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/tests.rs new file mode 100644 index 000000000000..7d522f1c9708 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/indexmap-2.6.0/tests/tests.rs @@ -0,0 +1,28 @@ +use indexmap::{indexmap, indexset}; + +#[test] +fn test_sort() { + let m = indexmap! { + 1 => 2, + 7 => 1, + 2 => 2, + 3 => 3, + }; + + itertools::assert_equal( + m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)), + vec![(7, 1), (1, 2), (2, 2), (3, 3)], + ); +} + +#[test] +fn test_sort_set() { + let s = indexset! { + 1, + 7, + 2, + 3, + }; + + itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]); +} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..2c428e6eb9cd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "a094e09534a243a4f39eaacf2a7b59e5e9368d30" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.github/workflows/ci.yml new file mode 100644 index 000000000000..0390cabe648e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.github/workflows/ci.yml @@ -0,0 +1,92 @@ +name: Continuous integration + +on: [push, pull_request] + +jobs: + build: + name: Build + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, macos-latest, windows-latest] + toolchain: [stable] + runs-on: ${{ matrix.platform }} + + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Cache dependencies & build outputs + uses: actions/cache@v4 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.toolchain }} + components: rustfmt, clippy + + - name: Check code format + run: cargo fmt --all -- --check + + - name: Code lint with default features + run: cargo clippy --all-targets -- -D warnings + + - name: Code lint without default features + run: cargo clippy --no-default-features --workspace -- -D warnings + + - name: Code lint with all features + run: cargo clippy --no-default-features --workspace --all-features -- -D warnings + + - name: Test with default features + run: cargo test --all-features + + - name: Test with without default features + run: cargo test --no-default-features + + - name: Test with all features + run: cargo test --all-features + + build-no-std: + name: Build no_std + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: thumbv6m-none-eabi + + - name: Build + run: cargo build --no-default-features --target thumbv6m-none-eabi + + build-no-std-serde: + name: Build no_std, but with `serde` feature enabled + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Build + # `thumbv6m-none-eabi` can't be used as Serde doesn't compile there. + run: cargo build --no-default-features --features serde + + check-lockfile: + name: Make sure the lockfile is up-to-date + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Error if checked-in lockfile is not up-to-date + run: cargo build --locked diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.gitignore new file mode 100644 index 000000000000..ea8c4bf7f35f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/.gitignore @@ -0,0 +1 @@ +/target diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/Cargo.toml new file mode 100644 index 000000000000..24084bbd020c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/Cargo.toml @@ -0,0 +1,80 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "ipld-core" +version = "0.4.1" +authors = ["Volker Mische "] +description = "IPLD core types" +readme = "README.md" +categories = [ + "data-structures", + "encoding", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/ipld/rust-ipld-core" + +[dependencies.cid] +version = "0.11.1" +features = ["alloc"] +default-features = false + +[dependencies.quickcheck] +version = "1.0" +optional = true + +[dependencies.serde] +version = "1.0.195" +features = ["alloc"] +optional = true +default-features = false + +[dependencies.serde_bytes] +version = "0.11.5" +optional = true +default-features = false + +[dev-dependencies.serde_derive] +version = "1.0.197" + +[dev-dependencies.serde_ipld_dagcbor] +version = "0.6.0" + +[dev-dependencies.serde_ipld_dagjson] +version = "0.2.0" + +[dev-dependencies.serde_json] +version = "1.0.79" + +[dev-dependencies.serde_test] +version = "1.0.132" + +[features] +arb = [ + "dep:quickcheck", + "cid/arb", +] +codec = [] +default = [ + "codec", + "std", +] +serde = [ + "dep:serde", + "dep:serde_bytes", + "cid/serde", +] +std = [ + "cid/std", + "serde?/std", + "serde_bytes?/std", +] diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/Cargo.toml.orig new file mode 100644 index 000000000000..8d7801652940 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/Cargo.toml.orig @@ -0,0 +1,45 @@ +[package] +name = "ipld-core" +version = "0.4.1" +authors = [ + "Volker Mische " +] +repository = "https://github.com/ipld/rust-ipld-core" +edition = "2021" +description = "IPLD core types" +license = "MIT OR Apache-2.0" +categories = ["data-structures", "encoding"] + +[features] +default = ["codec", "std"] +# Makes the error implement `std::error::Error` and the `Codec` trait available. +std = ["cid/std", "serde?/std", "serde_bytes?/std"] +# Enables support for Serde serialization into/deserialization from the `Ipld` enum. +serde = ["dep:serde", "dep:serde_bytes", "cid/serde"] +# Enables support for property based testing. +arb = ["dep:quickcheck", "cid/arb"] +# Enables support for the Codec trait, needs at least Rust 1.75 +codec = [] + +[dependencies] +cid = { version = "0.11.1", default-features = false, features = ["alloc"] } +quickcheck = { version = "1.0", optional = true } +serde = { version = "1.0.195", default-features = false, features = ["alloc"], optional = true } +serde_bytes = { version = "0.11.5", default-features = false, optional = true } + +[dev-dependencies] +serde_derive = "1.0.197" +serde_ipld_dagcbor = "0.6.0" +serde_ipld_dagjson = "0.2.0" +serde_json = "1.0.79" +serde_test = "1.0.132" + +# This is a hack in order to make the rustdoc tests and releases happy. +# We include README in the library docs, this way the are run as tests. Those examples create a +# circular dependency on `ipld-core` (as `serde_ipld_dagcbor` and `serde_ipld_dagjson` depend on +# `ipld-core`. +# Also without this change `cargo release` would complain, as the `Cargo.lock` needs modifications +# as the previously mentioned crates would need to depend on an already released and not the about +# to be released version of `ipld-core`. +[patch.crates-io] +ipld-core = { path = "." } diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/LICENSE-APACHE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/toml-0.5.9/LICENSE-APACHE rename to third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/LICENSE-APACHE diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/LICENSE-MIT similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/LICENSE-MIT rename to third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/LICENSE-MIT diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/README.md b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/README.md new file mode 100644 index 000000000000..eeaea0c953d4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/README.md @@ -0,0 +1,108 @@ +IPLD core +========= + +[![Crates.io](https://img.shields.io/crates/v/ipld-core.svg)](https://crates.io/crates/ipld-core) +[![Documentation](https://docs.rs/ipld-core/badge.svg)](https://docs.rs/ipld-core) + +This crate provides core types for interoperating with [IPLD]. Codecs are not part of this crate, they are independent, but rely on `ipld-core`. + +The code is based on [libipld-core]. The major difference is that [Serde] is used a lot more for better interoperability with the rest of the Rust ecosystem. + + +Usage +----- + +### Codec independent code + +One of the main features of IPLD is that your Codec is independent of the data you are encoding. Hence it's common that you want to have your code to be independent of a specific code, but rather be generic. + +Here's a full example of a function that can encode data with both [serde_ipld_dagcbor] or [serde_ipld_dagjson]: + +```rust +use std::str; + +use ipld_core::codec::Codec; +use serde::{Deserialize, Serialize}; +use serde_ipld_dagcbor::codec::DagCborCodec; +use serde_ipld_dagjson::codec::DagJsonCodec; + +#[derive(Deserialize, Serialize)] +struct Tree { + height: u8, + age: u8, +} + +fn encode_generic(value: &T) -> Result, C::Error> +where + C: Codec, +{ + C::encode_to_vec(value) +} + +fn main() { + let tree = Tree { + height: 12, + age: 91, + }; + + let cbor_encoded = encode_generic::(&tree); + #[allow(clippy::format_collect)] + let cbor_hex = cbor_encoded + .unwrap() + .iter() + .map(|byte| format!("{:02x}", byte)) + .collect::(); + // CBOR encoded: https://cbor.nemo157.com/#value=a2666865696768740c63616765185b + println!("CBOR encoded: https://cbor.nemo157.com/#value={}", cbor_hex); + let json_encoded = encode_generic::(&tree).unwrap(); + // JSON encoded: {"height":12,"age":91} + println!("JSON encoded: {}", str::from_utf8(&json_encoded).unwrap()); +} +``` + +### Extracting links + +If you are only interested in the links (CIDs) of an encoded IPLD object, then you can extract them them directly with [`Codec::links()`]: + +```rust +use ipld_core::{codec::{Codec, Links}, ipld, cid::Cid}; +use serde_ipld_dagjson::codec::DagJsonCodec; + +fn main() { + let cid = Cid::try_from("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); + let data = ipld!({"some": {"nested": cid}, "or": [cid, cid], "more": true}); + + let mut encoded = Vec::new(); + DagJsonCodec::encode(&mut encoded, &data).unwrap(); + + let links = DagJsonCodec::links(&encoded).unwrap().collect::>(); + // Extracted links: [Cid(bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy), Cid(bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy), Cid(bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy)] + println!("Extracted links: {:?}", links); +} +``` + + +Feature flags +------------- + + - `std` (enabled by default): Makes the error implement `std::error::Error` and the `Codec` trait available. + - `codec` (enabled by default): Provides the `Codec` trait, which enables encoding and decoding independent of the IPLD Codec. The minimum supported Rust version (MSRV) can significantly be reduced to 1.64 by disabling this feature. + - `serde`: Enables support for Serde serialization into/deserialization from the `Ipld` enum. + - `arb`: Enables support for property based testing. + + +License +------- + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or ) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or ) + +at your option. + +[IPLD]: https://ipld.io/ +[libipld-core]: https://crates.io/crates/libipld-core +[Serde]: https://serde.rs/ +[serde_ipld_dagcbor]: https://crates.io/crates/serde_ipld_dagcbor +[serde_ipld_dagjson]: https://crates.io/crates/serde_ipld_dagjson diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/arb.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/arb.rs new file mode 100644 index 000000000000..17efdded2804 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/arb.rs @@ -0,0 +1,66 @@ +//! Ipld representation. +use alloc::{boxed::Box, string::String, vec::Vec}; + +use crate::{cid::Cid, ipld::Ipld}; +use quickcheck::empty_shrinker; +use quickcheck::Arbitrary; + +impl quickcheck::Arbitrary for Ipld { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + Self::arbitrary_ipld(g, &mut g.size()) + } + + fn shrink(&self) -> Box> { + match self { + Ipld::Null => empty_shrinker(), + Ipld::Bool(v) => Box::new(v.shrink().map(Ipld::Bool)), + Ipld::Integer(v) => Box::new(v.shrink().map(Ipld::Integer)), + Ipld::Float(v) => Box::new(v.shrink().map(Ipld::Float)), + Ipld::String(v) => Box::new(v.shrink().map(Ipld::String)), + Ipld::Bytes(v) => Box::new(v.shrink().map(Ipld::Bytes)), + Ipld::List(v) => Box::new(v.shrink().map(Ipld::List)), + Ipld::Map(v) => Box::new(v.shrink().map(Ipld::Map)), + Ipld::Link(v) => Box::new(v.shrink().map(Ipld::Link)), + } + } +} + +impl Ipld { + /// Special version on `arbitrary` to battle possible recursion + fn arbitrary_ipld(g: &mut quickcheck::Gen, size: &mut usize) -> Self { + if *size == 0 { + return Ipld::Null; + } + *size -= 1; + let index = usize::arbitrary(g) % 9; + match index { + 0 => Ipld::Null, + 1 => Ipld::Bool(bool::arbitrary(g)), + 2 => Ipld::Integer(i128::arbitrary(g)), + 3 => Ipld::Float(f64::arbitrary(g)), + 4 => Ipld::String(String::arbitrary(g)), + 5 => Ipld::Bytes(Vec::arbitrary(g)), + 6 => Ipld::List( + (0..Self::arbitrary_size(g, size)) + .map(|_| Self::arbitrary_ipld(g, size)) + .collect(), + ), + 7 => Ipld::Map( + (0..Self::arbitrary_size(g, size)) + .map(|_| (String::arbitrary(g), Self::arbitrary_ipld(g, size))) + .collect(), + ), + 8 => Ipld::Link(Cid::arbitrary(g)), + // unreachable due to the fact that + // we know that the index is always < 9 + _ => unreachable!(), + } + } + + fn arbitrary_size(g: &mut quickcheck::Gen, size: &mut usize) -> usize { + if *size == 0 { + return 0; + } + usize::arbitrary(g) % *size + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/codec.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/codec.rs new file mode 100644 index 000000000000..dbe85e8cf69c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/codec.rs @@ -0,0 +1,43 @@ +//! This module contains traits to have a unified API across codecs. +//! +//! There are two traits defined, [`Codec`] and [`Links`]. Those are separate traits as the `Links` +//! trait is not generic over a certain type. + +use cid::Cid; + +use std::io::{BufRead, Write}; + +/// Each IPLD codec implementation should implement this Codec trait. This way codecs can be more +/// easily exchanged or combined. +pub trait Codec: Links { + /// The multicodec code of the IPLD codec. + const CODE: u64; + /// The error that is returned if encoding or decoding fails. + type Error; + + /// Decode a reader into the desired type. + fn decode(reader: R) -> Result; + /// Encode a type into a writer. + fn encode(writer: W, data: &T) -> Result<(), Self::Error>; + + /// Decode a slice into the desired type. + fn decode_from_slice(bytes: &[u8]) -> Result { + Self::decode(bytes) + } + + /// Encode a type into bytes. + fn encode_to_vec(data: &T) -> Result, Self::Error> { + let mut output = Vec::new(); + Self::encode(&mut output, data)?; + Ok(output) + } +} + +/// Trait for returning the links of a serialized IPLD data. +pub trait Links { + /// The error that is returned if the link extraction fails. + type LinksError; + + /// Return all links (CIDs) that the given encoded data contains. + fn links(bytes: &[u8]) -> Result, Self::LinksError>; +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/convert.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/convert.rs new file mode 100644 index 000000000000..5fc834ffd753 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/convert.rs @@ -0,0 +1,348 @@ +//! Conversion to and from ipld. +use alloc::{ + borrow::ToOwned, + boxed::Box, + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; +use core::{any::TypeId, fmt}; + +use crate::{ + cid::Cid, + ipld::{Ipld, IpldKind}, +}; + +/// Error used for converting from and into [`crate::ipld::Ipld`]. +#[derive(Clone, Debug)] +#[non_exhaustive] +pub enum ConversionError { + /// Error when the IPLD kind wasn't the one we expected. + WrongIpldKind { + /// The expected type. + expected: IpldKind, + /// The actual type. + found: IpldKind, + }, + /// Error when the given Ipld kind cannot be converted into a certain value type. + FromIpld { + /// The IPLD kind trying to convert from. + from: IpldKind, + /// The type trying to convert into. + into: TypeId, + }, +} + +impl fmt::Display for ConversionError { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::WrongIpldKind { expected, found } => { + write!( + formatter, + "kind error: expected {:?} but found {:?}", + expected, found + ) + } + Self::FromIpld { from, into } => { + write!( + formatter, + "conversion error: cannot convert {:?} into {:?}", + from, into + ) + } + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for ConversionError {} + +impl TryFrom for () { + type Error = ConversionError; + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::Null => Ok(()), + _ => Err(ConversionError::WrongIpldKind { + expected: IpldKind::Null, + found: ipld.kind(), + }), + } + } +} + +macro_rules! derive_try_from_ipld_option { + ($enum:ident, $ty:ty) => { + impl TryFrom for Option<$ty> { + type Error = ConversionError; + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::Null => Ok(None), + Ipld::$enum(value) => Ok(Some(value.try_into().map_err(|_| { + ConversionError::FromIpld { + from: IpldKind::$enum, + into: TypeId::of::<$ty>(), + } + })?)), + _ => Err(ConversionError::WrongIpldKind { + expected: IpldKind::$enum, + found: ipld.kind(), + }), + } + } + } + }; +} + +macro_rules! derive_try_from_ipld { + ($enum:ident, $ty:ty) => { + impl TryFrom for $ty { + type Error = ConversionError; + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::$enum(value) => { + Ok(value.try_into().map_err(|_| ConversionError::FromIpld { + from: IpldKind::$enum, + into: TypeId::of::<$ty>(), + })?) + } + + _ => Err(ConversionError::WrongIpldKind { + expected: IpldKind::$enum, + found: ipld.kind(), + }), + } + } + } + }; +} + +macro_rules! derive_into_ipld_prim { + ($enum:ident, $ty:ty, $fn:ident) => { + impl From<$ty> for Ipld { + fn from(t: $ty) -> Self { + Ipld::$enum(t.$fn() as _) + } + } + }; +} + +macro_rules! derive_into_ipld { + ($enum:ident, $ty:ty, $($fn:ident),*) => { + impl From<$ty> for Ipld { + fn from(t: $ty) -> Self { + Ipld::$enum(t$(.$fn())*) + } + } + }; +} + +derive_into_ipld!(Bool, bool, clone); +derive_into_ipld_prim!(Integer, i8, clone); +derive_into_ipld_prim!(Integer, i16, clone); +derive_into_ipld_prim!(Integer, i32, clone); +derive_into_ipld_prim!(Integer, i64, clone); +derive_into_ipld_prim!(Integer, i128, clone); +derive_into_ipld_prim!(Integer, isize, clone); +derive_into_ipld_prim!(Integer, u8, clone); +derive_into_ipld_prim!(Integer, u16, clone); +derive_into_ipld_prim!(Integer, u32, clone); +derive_into_ipld_prim!(Integer, u64, clone); +derive_into_ipld_prim!(Integer, usize, clone); +derive_into_ipld_prim!(Float, f32, clone); +derive_into_ipld_prim!(Float, f64, clone); +derive_into_ipld!(String, String, into); +derive_into_ipld!(String, &str, to_string); +derive_into_ipld!(Bytes, Box<[u8]>, into_vec); +derive_into_ipld!(Bytes, Vec, into); +derive_into_ipld!(Bytes, &[u8], to_vec); +derive_into_ipld!(List, Vec, into); +derive_into_ipld!(Map, BTreeMap, to_owned); +derive_into_ipld!(Link, Cid, clone); +derive_into_ipld!(Link, &Cid, to_owned); + +derive_try_from_ipld!(Bool, bool); +derive_try_from_ipld!(Integer, i8); +derive_try_from_ipld!(Integer, i16); +derive_try_from_ipld!(Integer, i32); +derive_try_from_ipld!(Integer, i64); +derive_try_from_ipld!(Integer, i128); +derive_try_from_ipld!(Integer, isize); +derive_try_from_ipld!(Integer, u8); +derive_try_from_ipld!(Integer, u16); +derive_try_from_ipld!(Integer, u32); +derive_try_from_ipld!(Integer, u64); +derive_try_from_ipld!(Integer, u128); +derive_try_from_ipld!(Integer, usize); + +//derive_from_ipld!(Float, f32); // User explicit conversion is prefered. Would implicitly lossily convert from f64. + +derive_try_from_ipld!(Float, f64); +derive_try_from_ipld!(String, String); +derive_try_from_ipld!(Bytes, Vec); +derive_try_from_ipld!(List, Vec); +derive_try_from_ipld!(Map, BTreeMap); +derive_try_from_ipld!(Link, Cid); + +derive_try_from_ipld_option!(Bool, bool); +derive_try_from_ipld_option!(Integer, i8); +derive_try_from_ipld_option!(Integer, i16); +derive_try_from_ipld_option!(Integer, i32); +derive_try_from_ipld_option!(Integer, i64); +derive_try_from_ipld_option!(Integer, i128); +derive_try_from_ipld_option!(Integer, isize); +derive_try_from_ipld_option!(Integer, u8); +derive_try_from_ipld_option!(Integer, u16); +derive_try_from_ipld_option!(Integer, u32); +derive_try_from_ipld_option!(Integer, u64); +derive_try_from_ipld_option!(Integer, u128); +derive_try_from_ipld_option!(Integer, usize); + +//derive_from_ipld_option!(Float, f32); // User explicit conversion is prefered. Would implicitly lossily convert from f64. + +derive_try_from_ipld_option!(Float, f64); +derive_try_from_ipld_option!(String, String); +derive_try_from_ipld_option!(Bytes, Vec); +derive_try_from_ipld_option!(List, Vec); +derive_try_from_ipld_option!(Map, BTreeMap); +derive_try_from_ipld_option!(Link, Cid); + +#[cfg(test)] +mod tests { + use alloc::{collections::BTreeMap, string::String, vec, vec::Vec}; + + use cid::Cid; + + use crate::ipld::Ipld; + + #[test] + #[should_panic] + fn try_into_wrong_type() { + let _boolean: bool = Ipld::Integer(u8::MAX as i128).try_into().unwrap(); + } + + #[test] + #[should_panic] + fn try_into_wrong_range() { + let int: u128 = Ipld::Integer(-1i128).try_into().unwrap(); + assert_eq!(int, u128::MIN); + } + + #[test] + fn try_into_bool() { + let boolean: bool = Ipld::Bool(true).try_into().unwrap(); + assert!(boolean); + + let boolean: Option = Ipld::Null.try_into().unwrap(); + assert_eq!(boolean, Option::None) + } + + #[test] + fn try_into_ints() { + let int: u8 = Ipld::Integer(u8::MAX as i128).try_into().unwrap(); + assert_eq!(int, u8::MAX); + + let int: u16 = Ipld::Integer(u16::MAX as i128).try_into().unwrap(); + assert_eq!(int, u16::MAX); + + let int: u32 = Ipld::Integer(u32::MAX as i128).try_into().unwrap(); + assert_eq!(int, u32::MAX); + + let int: u64 = Ipld::Integer(u64::MAX as i128).try_into().unwrap(); + assert_eq!(int, u64::MAX); + + let int: usize = Ipld::Integer(usize::MAX as i128).try_into().unwrap(); + assert_eq!(int, usize::MAX); + + let int: u128 = Ipld::Integer(i128::MAX).try_into().unwrap(); + assert_eq!(int, i128::MAX as u128); + + let int: i8 = Ipld::Integer(i8::MIN as i128).try_into().unwrap(); + assert_eq!(int, i8::MIN); + + let int: i16 = Ipld::Integer(i16::MIN as i128).try_into().unwrap(); + assert_eq!(int, i16::MIN); + + let int: i32 = Ipld::Integer(i32::MIN as i128).try_into().unwrap(); + assert_eq!(int, i32::MIN); + + let int: i64 = Ipld::Integer(i64::MIN as i128).try_into().unwrap(); + assert_eq!(int, i64::MIN); + + let int: isize = Ipld::Integer(isize::MIN as i128).try_into().unwrap(); + assert_eq!(int, isize::MIN); + + let int: i128 = Ipld::Integer(i128::MIN).try_into().unwrap(); + assert_eq!(int, i128::MIN); + + let int: Option = Ipld::Null.try_into().unwrap(); + assert_eq!(int, Option::None) + } + + #[test] + fn try_into_floats() { + /* let float: f32 = Ipld::Float(f32::MAX as f64).try_into().unwrap(); + assert_eq!(float, f32::MAX); */ + + let float: f64 = Ipld::Float(f64::MAX).try_into().unwrap(); + assert_eq!(float, f64::MAX); + + let float: Option = Ipld::Null.try_into().unwrap(); + assert_eq!(float, Option::None) + } + + #[test] + fn try_into_string() { + let lyrics: String = "I'm blue babedi babeda".into(); + let string: String = Ipld::String(lyrics.clone()).try_into().unwrap(); + assert_eq!(string, lyrics); + + let option: Option = Ipld::Null.try_into().unwrap(); + assert_eq!(option, Option::None) + } + + #[test] + fn try_into_vec() { + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let bytes: Vec = Ipld::Bytes(data.clone()).try_into().unwrap(); + assert_eq!(bytes, data); + + let option: Option> = Ipld::Null.try_into().unwrap(); + assert_eq!(option, Option::None) + } + + #[test] + fn try_into_list() { + let ints = vec![Ipld::Integer(0), Ipld::Integer(1), Ipld::Integer(2)]; + let list: Vec = Ipld::List(ints.clone()).try_into().unwrap(); + assert_eq!(ints, list); + + let option: Option> = Ipld::Null.try_into().unwrap(); + assert_eq!(option, Option::None) + } + + #[test] + fn try_into_map() { + let mut numbs = BTreeMap::new(); + numbs.insert("zero".into(), Ipld::Integer(0)); + numbs.insert("one".into(), Ipld::Integer(1)); + numbs.insert("two".into(), Ipld::Integer(2)); + let map: BTreeMap = Ipld::Map(numbs.clone()).try_into().unwrap(); + assert_eq!(numbs, map); + + let option: Option> = Ipld::Null.try_into().unwrap(); + assert_eq!(option, Option::None) + } + + #[test] + fn try_into_cid() { + let cid = Cid::default(); + let link: Cid = Ipld::Link(cid).try_into().unwrap(); + assert_eq!(cid, link); + + let option: Option = Ipld::Null.try_into().unwrap(); + assert_eq!(option, Option::None) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/ipld.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/ipld.rs new file mode 100644 index 000000000000..ff0b9f71dd8b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/ipld.rs @@ -0,0 +1,399 @@ +//! Ipld representation. +use alloc::{ + borrow::ToOwned, + boxed::Box, + collections::BTreeMap, + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::fmt; + +use cid::Cid; + +/// Error when accessing IPLD List or Map elements. +#[derive(Clone, Debug)] +#[non_exhaustive] +pub enum IndexError { + /// Error when key cannot be parsed into an integer. + ParseInteger(String), + /// Error when the input wasn't an IPLD List or Map. + WrongKind(IpldKind), +} + +impl fmt::Display for IndexError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::ParseInteger(key) => write!(f, "cannot parse key into integer: {}", key), + Self::WrongKind(kind) => write!(f, "expected IPLD List or Map but found: {:?}", kind), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for IndexError {} + +/// Ipld +#[derive(Clone)] +pub enum Ipld { + /// Represents the absence of a value or the value undefined. + Null, + /// Represents a boolean value. + Bool(bool), + /// Represents an integer. + Integer(i128), + /// Represents a floating point value. + Float(f64), + /// Represents an UTF-8 string. + String(String), + /// Represents a sequence of bytes. + Bytes(Vec), + /// Represents a list. + List(Vec), + /// Represents a map of strings. + Map(BTreeMap), + /// Represents a map of integers. + Link(Cid), +} + +impl fmt::Debug for Ipld { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if f.alternate() { + match self { + Self::Null => write!(f, "Null"), + Self::Bool(b) => write!(f, "Bool({:?})", b), + Self::Integer(i) => write!(f, "Integer({:?})", i), + Self::Float(i) => write!(f, "Float({:?})", i), + Self::String(s) => write!(f, "String({:?})", s), + Self::Bytes(b) => write!(f, "Bytes({:?})", b), + Self::List(l) => write!(f, "List({:#?})", l), + Self::Map(m) => write!(f, "Map({:#?})", m), + Self::Link(cid) => write!(f, "Link({})", cid), + } + } else { + match self { + Self::Null => write!(f, "null"), + Self::Bool(b) => write!(f, "{:?}", b), + Self::Integer(i) => write!(f, "{:?}", i), + Self::Float(i) => write!(f, "{:?}", i), + Self::String(s) => write!(f, "{:?}", s), + Self::Bytes(b) => write!(f, "{:?}", b), + Self::List(l) => write!(f, "{:?}", l), + Self::Map(m) => write!(f, "{:?}", m), + Self::Link(cid) => write!(f, "{}", cid), + } + } + } +} + +/// NaN floats are forbidden in the IPLD Data Model, but we do not enforce it. So in case such a +/// value is introduced accidentally, make sure that it still compares as equal. This allows us +/// to implement `Eq` for `Ipld`. +impl PartialEq for Ipld { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Null, Self::Null) => true, + (Self::Bool(self_value), Self::Bool(other_value)) => self_value == other_value, + (Self::Integer(self_value), Self::Integer(other_value)) => self_value == other_value, + (Self::Float(self_value), Self::Float(other_value)) => { + // Treat two NaNs as being equal. + self_value == other_value || self_value.is_nan() && other_value.is_nan() + } + (Self::String(self_value), Self::String(other_value)) => self_value == other_value, + (Self::Bytes(self_value), Self::Bytes(other_value)) => self_value == other_value, + (Self::List(self_value), Self::List(other_value)) => self_value == other_value, + (Self::Map(self_value), Self::Map(other_value)) => self_value == other_value, + (Self::Link(self_value), Self::Link(other_value)) => self_value == other_value, + _ => false, + } + } +} + +impl Eq for Ipld {} + +/// IPLD Kind information without the actual value. +/// +/// Sometimes it's useful to know the kind of an Ipld object without the actual value, e.g. for +/// error reporting. Those kinds can be a unity-only enum. +#[derive(Clone, Debug)] +pub enum IpldKind { + /// Null type. + Null, + /// Boolean type. + Bool, + /// Integer type. + Integer, + /// Float type. + Float, + /// String type. + String, + /// Bytes type. + Bytes, + /// List type. + List, + /// Map type. + Map, + /// Link type. + Link, +} + +/// An index into IPLD. +/// +/// It's used for accessing IPLD List and Map elements. +pub enum IpldIndex<'a> { + /// An index into an ipld list. + List(usize), + /// An owned index into an ipld map. + Map(String), + /// An index into an ipld map. + MapRef(&'a str), +} + +impl<'a> From for IpldIndex<'a> { + fn from(index: usize) -> Self { + Self::List(index) + } +} + +impl<'a> From for IpldIndex<'a> { + fn from(key: String) -> Self { + Self::Map(key) + } +} + +impl<'a> From<&'a str> for IpldIndex<'a> { + fn from(key: &'a str) -> Self { + Self::MapRef(key) + } +} + +impl<'a> TryFrom> for usize { + type Error = IndexError; + + fn try_from(index: IpldIndex<'a>) -> Result { + let parsed = match index { + IpldIndex::List(i) => i, + IpldIndex::Map(ref key) => key + .parse() + .map_err(|_| IndexError::ParseInteger(key.to_string()))?, + IpldIndex::MapRef(key) => key + .parse() + .map_err(|_| IndexError::ParseInteger(key.to_string()))?, + }; + Ok(parsed) + } +} + +impl<'a> From> for String { + fn from(index: IpldIndex<'a>) -> Self { + match index { + IpldIndex::Map(ref key) => key.to_string(), + IpldIndex::MapRef(key) => key.to_string(), + IpldIndex::List(i) => i.to_string(), + } + } +} + +impl Ipld { + /// Convert from an [`Ipld`] object into its kind without any associated values. + /// + /// This is intentionally not implemented via `From` to prevent accidental conversions by + /// making it more explicit. + pub fn kind(&self) -> IpldKind { + match self { + Ipld::Null => IpldKind::Null, + Ipld::Bool(_) => IpldKind::Bool, + Ipld::Integer(_) => IpldKind::Integer, + Ipld::Float(_) => IpldKind::Float, + Ipld::String(_) => IpldKind::String, + Ipld::Bytes(_) => IpldKind::Bytes, + Ipld::List(_) => IpldKind::List, + Ipld::Map(_) => IpldKind::Map, + Ipld::Link(_) => IpldKind::Link, + } + } + + /// Destructs an ipld list or map + pub fn take<'a, T: Into>>( + mut self, + index: T, + ) -> Result, IndexError> { + let index = index.into(); + match &mut self { + Ipld::List(ref mut list) => { + let parsed_index = usize::try_from(index)?; + if parsed_index < list.len() { + Ok(Some(list.swap_remove(parsed_index))) + } else { + Ok(None) + } + } + Ipld::Map(ref mut map) => { + let key = String::from(index); + Ok(map.remove(&key)) + } + other => Err(IndexError::WrongKind(other.kind())), + } + } + + /// Indexes into an ipld list or map. + pub fn get<'a, T: Into>>(&self, index: T) -> Result, IndexError> { + let index = index.into(); + match self { + Ipld::List(list) => { + let parsed_index = usize::try_from(index)?; + Ok(list.get(parsed_index)) + } + Ipld::Map(map) => { + let key = String::from(index); + Ok(map.get(&key)) + } + other => Err(IndexError::WrongKind(other.kind())), + } + } + + /// Returns an iterator. + pub fn iter(&self) -> IpldIter<'_> { + IpldIter { + stack: vec![Box::new(vec![self].into_iter())], + } + } + + /// Returns the references to other blocks. + pub fn references>(&self, set: &mut E) { + for ipld in self.iter() { + if let Ipld::Link(cid) = ipld { + set.extend(core::iter::once(cid.to_owned())); + } + } + } +} + +/// Ipld iterator. +pub struct IpldIter<'a> { + stack: Vec + 'a>>, +} + +impl<'a> Iterator for IpldIter<'a> { + type Item = &'a Ipld; + + fn next(&mut self) -> Option { + loop { + if let Some(iter) = self.stack.last_mut() { + if let Some(ipld) = iter.next() { + match ipld { + Ipld::List(list) => { + self.stack.push(Box::new(list.iter())); + } + Ipld::Map(map) => { + self.stack.push(Box::new(map.values())); + } + _ => {} + } + return Some(ipld); + } else { + self.stack.pop(); + } + } else { + return None; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ipld_bool_from() { + assert_eq!(Ipld::Bool(true), Ipld::from(true)); + assert_eq!(Ipld::Bool(false), Ipld::from(false)); + } + + #[test] + fn test_ipld_integer_from() { + assert_eq!(Ipld::Integer(1), Ipld::from(1i8)); + assert_eq!(Ipld::Integer(1), Ipld::from(1i16)); + assert_eq!(Ipld::Integer(1), Ipld::from(1i32)); + assert_eq!(Ipld::Integer(1), Ipld::from(1i64)); + assert_eq!(Ipld::Integer(1), Ipld::from(1i128)); + + //assert_eq!(Ipld::Integer(1), 1u8.to_ipld().to_owned()); + assert_eq!(Ipld::Integer(1), Ipld::from(1u16)); + assert_eq!(Ipld::Integer(1), Ipld::from(1u32)); + assert_eq!(Ipld::Integer(1), Ipld::from(1u64)); + } + + #[test] + fn test_ipld_float_from() { + assert_eq!(Ipld::Float(1.0), Ipld::from(1.0f32)); + assert_eq!(Ipld::Float(1.0), Ipld::from(1.0f64)); + } + + #[test] + fn test_ipld_string_from() { + assert_eq!(Ipld::String("a string".into()), Ipld::from("a string")); + assert_eq!( + Ipld::String("a string".into()), + Ipld::from("a string".to_string()) + ); + } + + #[test] + fn test_ipld_bytes_from() { + assert_eq!( + Ipld::Bytes(vec![0, 1, 2, 3]), + Ipld::from(&[0u8, 1u8, 2u8, 3u8][..]) + ); + assert_eq!( + Ipld::Bytes(vec![0, 1, 2, 3]), + Ipld::from(vec![0u8, 1u8, 2u8, 3u8]) + ); + } + + #[test] + fn test_ipld_link_from() { + let cid = + Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + assert_eq!(Ipld::Link(cid), Ipld::from(cid)); + } + + #[test] + fn test_take() { + let ipld = Ipld::List(vec![Ipld::Integer(0), Ipld::Integer(1), Ipld::Integer(2)]); + assert_eq!(ipld.clone().take(0).unwrap(), Some(Ipld::Integer(0))); + assert_eq!(ipld.clone().take(1).unwrap(), Some(Ipld::Integer(1))); + assert_eq!(ipld.take(2).unwrap(), Some(Ipld::Integer(2))); + + let mut map = BTreeMap::new(); + map.insert("a".to_string(), Ipld::Integer(0)); + map.insert("b".to_string(), Ipld::Integer(1)); + map.insert("c".to_string(), Ipld::Integer(2)); + let ipld = Ipld::Map(map); + assert_eq!(ipld.take("a").unwrap(), Some(Ipld::Integer(0))); + } + + #[test] + fn test_get() { + let ipld = Ipld::List(vec![Ipld::Integer(0), Ipld::Integer(1), Ipld::Integer(2)]); + assert_eq!(ipld.get(0).unwrap(), Some(&Ipld::Integer(0))); + assert_eq!(ipld.get(1).unwrap(), Some(&Ipld::Integer(1))); + assert_eq!(ipld.get(2).unwrap(), Some(&Ipld::Integer(2))); + + let mut map = BTreeMap::new(); + map.insert("a".to_string(), Ipld::Integer(0)); + map.insert("b".to_string(), Ipld::Integer(1)); + map.insert("c".to_string(), Ipld::Integer(2)); + let ipld = Ipld::Map(map); + assert_eq!(ipld.get("a").unwrap(), Some(&Ipld::Integer(0))); + } + + // NaN floats are forbidden in the IPLD Data Model, but still make sure they are treated as + // equal in case they accidentally end up there. + #[test] + fn test_partial_eq_nan() { + let invalid_ipld = Ipld::Float(f64::NAN); + assert_eq!(invalid_ipld, invalid_ipld); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/lib.rs new file mode 100644 index 000000000000..019025363202 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/lib.rs @@ -0,0 +1,27 @@ +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![deny(warnings)] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +#[cfg(all(feature = "std", feature = "codec"))] +pub mod codec; +pub mod convert; +pub mod ipld; +#[cfg(feature = "serde")] +pub mod serde; + +#[cfg(feature = "arb")] +mod arb; +mod macros; + +pub use cid; + +// This is a hack to get those types working in the `ipld!` macro with and without `no_std`. The +// idea is from +// https://stackoverflow.com/questions/71675411/refer-to-an-extern-crate-in-macro-expansion/71675639#71675639 +#[doc(hidden)] +pub mod __private_do_not_use { + pub use alloc::{collections::BTreeMap, vec}; +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/macros.rs new file mode 100644 index 000000000000..5c74db8a3b96 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/macros.rs @@ -0,0 +1,291 @@ +//! `ipld!` macro. +/// Construct an `Ipld` from a literal. +/// +/// ```edition2018 +/// # extern crate alloc; +/// # use ipld_core::ipld; +/// # +/// let value = ipld!({ +/// "code": 200, +/// "success": true, +/// "payload": { +/// "features": [ +/// "serde", +/// "json" +/// ] +/// } +/// }); +/// ``` +/// +/// Variables or expressions can be interpolated into the JSON literal. Any type +/// interpolated into an array element or object value must implement Serde's +/// `Serialize` trait, while any type interpolated into a object key must +/// implement `Into`. If the `Serialize` implementation of the +/// interpolated type decides to fail, or if the interpolated type contains a +/// map with non-string keys, the `json!` macro will panic. +/// +/// ```edition2018 +/// # extern crate alloc; +/// # use ipld_core::ipld; +/// # +/// let code = 200; +/// let features = vec!["serde", "json"]; +/// +/// let value = ipld!({ +/// "code": code, +/// "success": code == 200, +/// "payload": { +/// features[0]: features[1] +/// } +/// }); +/// ``` +/// +/// Trailing commas are allowed inside both arrays and objects. +/// +/// ```edition2018 +/// # extern crate alloc; +/// # use ipld_core::ipld; +/// # +/// let value = ipld!([ +/// "notice", +/// "the", +/// "trailing", +/// "comma -->", +/// ]); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! ipld { + // Hide distracting implementation details from the generated rustdoc. + ($($ipld:tt)+) => { + ipld_internal!($($ipld)+) + }; +} + +#[macro_export(local_inner_macros)] +#[doc(hidden)] +macro_rules! ipld_internal { + ////////////////////////////////////////////////////////////////////////// + // TT muncher for parsing the inside of an array [...]. Produces a vec![...] + // of the elements. + // + // Must be invoked as: ipld_internal!(@array [] $($tt)*) + ////////////////////////////////////////////////////////////////////////// + + // Done with trailing comma. + (@array [$($elems:expr,)*]) => { + ipld_internal_vec![$($elems,)*] + }; + + // Done without trailing comma. + (@array [$($elems:expr),*]) => { + ipld_internal_vec![$($elems),*] + }; + + // Next element is `null`. + (@array [$($elems:expr,)*] null $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)* ipld_internal!(null)] $($rest)*) + }; + + // Next element is `true`. + (@array [$($elems:expr,)*] true $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)* ipld_internal!(true)] $($rest)*) + }; + + // Next element is `false`. + (@array [$($elems:expr,)*] false $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)* ipld_internal!(false)] $($rest)*) + }; + + // Next element is an array. + (@array [$($elems:expr,)*] [$($array:tt)*] $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)* ipld_internal!([$($array)*])] $($rest)*) + }; + + // Next element is a map. + (@array [$($elems:expr,)*] {$($map:tt)*} $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)* ipld_internal!({$($map)*})] $($rest)*) + }; + + // Next element is an expression followed by comma. + (@array [$($elems:expr,)*] $next:expr, $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)* ipld_internal!($next),] $($rest)*) + }; + + // Last element is an expression with no trailing comma. + (@array [$($elems:expr,)*] $last:expr) => { + ipld_internal!(@array [$($elems,)* ipld_internal!($last)]) + }; + + // Comma after the most recent element. + (@array [$($elems:expr),*] , $($rest:tt)*) => { + ipld_internal!(@array [$($elems,)*] $($rest)*) + }; + + // Unexpected token after most recent element. + (@array [$($elems:expr),*] $unexpected:tt $($rest:tt)*) => { + ipld_unexpected!($unexpected) + }; + + ////////////////////////////////////////////////////////////////////////// + // TT muncher for parsing the inside of an object {...}. Each entry is + // inserted into the given map variable. + // + // Must be invoked as: json_internal!(@object $map () ($($tt)*) ($($tt)*)) + // + // We require two copies of the input tokens so that we can match on one + // copy and trigger errors on the other copy. + ////////////////////////////////////////////////////////////////////////// + + // Done. + (@object $object:ident () () ()) => {}; + + // Insert the current entry followed by trailing comma. + (@object $object:ident [$($key:tt)+] ($value:expr) , $($rest:tt)*) => { + let _ = $object.insert(($($key)+).into(), $value); + ipld_internal!(@object $object () ($($rest)*) ($($rest)*)); + }; + + // Current entry followed by unexpected token. + (@object $object:ident [$($key:tt)+] ($value:expr) $unexpected:tt $($rest:tt)*) => { + ipld_unexpected!($unexpected); + }; + + // Insert the last entry without trailing comma. + (@object $object:ident [$($key:tt)+] ($value:expr)) => { + let _ = $object.insert(($($key)+).into(), $value); + }; + + // Next value is `null`. + (@object $object:ident ($($key:tt)+) (: null $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!(null)) $($rest)*); + }; + + // Next value is `true`. + (@object $object:ident ($($key:tt)+) (: true $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!(true)) $($rest)*); + }; + + // Next value is `false`. + (@object $object:ident ($($key:tt)+) (: false $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!(false)) $($rest)*); + }; + + // Next value is an array. + (@object $object:ident ($($key:tt)+) (: [$($array:tt)*] $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!([$($array)*])) $($rest)*); + }; + + // Next value is a map. + (@object $object:ident ($($key:tt)+) (: {$($map:tt)*} $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!({$($map)*})) $($rest)*); + }; + + // Next value is an expression followed by comma. + (@object $object:ident ($($key:tt)+) (: $value:expr , $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!($value)) , $($rest)*); + }; + + // Last value is an expression with no trailing comma. + (@object $object:ident ($($key:tt)+) (: $value:expr) $copy:tt) => { + ipld_internal!(@object $object [$($key)+] (ipld_internal!($value))); + }; + + // Missing value for last entry. Trigger a reasonable error message. + (@object $object:ident ($($key:tt)+) (:) $copy:tt) => { + // "unexpected end of macro invocation" + ipld_internal!(); + }; + + // Missing colon and value for last entry. Trigger a reasonable error + // message. + (@object $object:ident ($($key:tt)+) () $copy:tt) => { + // "unexpected end of macro invocation" + ipld_internal!(); + }; + + // Misplaced colon. Trigger a reasonable error message. + (@object $object:ident () (: $($rest:tt)*) ($colon:tt $($copy:tt)*)) => { + // Takes no arguments so "no rules expected the token `:`". + ipld_unexpected!($colon); + }; + + // Found a comma inside a key. Trigger a reasonable error message. + (@object $object:ident ($($key:tt)*) (, $($rest:tt)*) ($comma:tt $($copy:tt)*)) => { + // Takes no arguments so "no rules expected the token `,`". + ipld_unexpected!($comma); + }; + + // Key is fully parenthesized. This avoids clippy double_parens false + // positives because the parenthesization may be necessary here. + (@object $object:ident () (($key:expr) : $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object ($key) (: $($rest)*) (: $($rest)*)); + }; + + // Munch a token into the current key. + (@object $object:ident ($($key:tt)*) ($tt:tt $($rest:tt)*) $copy:tt) => { + ipld_internal!(@object $object ($($key)* $tt) ($($rest)*) ($($rest)*)); + }; + + ////////////////////////////////////////////////////////////////////////// + // The main implementation. + // + // Must be invoked as: json_internal!($($json)+) + ////////////////////////////////////////////////////////////////////////// + + (null) => { + $crate::ipld::Ipld::Null + }; + + (true) => { + $crate::ipld::Ipld::Bool(true) + }; + + (false) => { + $crate::ipld::Ipld::Bool(false) + }; + + ([]) => { + $crate::ipld::Ipld::List(ipld_internal_vec![]) + }; + + ([ $($tt:tt)+ ]) => { + $crate::ipld::Ipld::List(ipld_internal!(@array [] $($tt)+)) + }; + + ({}) => { + $crate::ipld::Ipld::Map($crate::__private_do_not_use::BTreeMap::new()) + }; + + ({ $($tt:tt)+ }) => { + $crate::ipld::Ipld::Map({ + let mut object = $crate::__private_do_not_use::BTreeMap::new(); + ipld_internal!(@object object () ($($tt)+) ($($tt)+)); + object + }) + }; + + // Any Serialize type: numbers, strings, struct literals, variables etc. + // Must be below every other rule. + ($other:expr) => { + { + $crate::ipld::Ipld::from($other) + } + }; +} + +// The json_internal macro above cannot invoke vec directly because it uses +// local_inner_macros. A vec invocation there would resolve to $crate::vec. +// Instead invoke vec here outside of local_inner_macros. +#[macro_export] +#[doc(hidden)] +macro_rules! ipld_internal_vec { + ($($content:tt)*) => { + $crate::__private_do_not_use::vec![$($content)*] + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! ipld_unexpected { + () => {}; +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/de.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/de.rs new file mode 100644 index 000000000000..cd890a64bc77 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/de.rs @@ -0,0 +1,766 @@ +use alloc::{borrow::ToOwned, collections::BTreeMap, format, string::String, vec::Vec}; +use core::{convert::TryFrom, fmt}; + +use cid::serde::{BytesToCidVisitor, CID_SERDE_PRIVATE_IDENTIFIER}; +use cid::Cid; +use serde::{ + de::{self, IntoDeserializer}, + forward_to_deserialize_any, Deserialize, +}; + +use crate::{ipld::Ipld, serde::SerdeError}; + +/// Deserialize instances of [`crate::ipld::Ipld`]. +/// +/// # Example +/// +/// ``` +/// use std::collections::BTreeMap; +/// +/// use serde_derive::Deserialize; +/// use ipld_core::ipld::Ipld; +/// use ipld_core::serde::from_ipld; +/// +/// #[derive(Deserialize)] +/// struct Person { +/// name: String, +/// age: u8, +/// hobbies: Vec, +/// is_cool: bool, +/// } +/// +/// let ipld = Ipld::Map({ +/// BTreeMap::from([ +/// ("name".into(), Ipld::String("Hello World!".into())), +/// ("age".into(), Ipld::Integer(52)), +/// ( +/// "hobbies".into(), +/// Ipld::List(vec![ +/// Ipld::String("geography".into()), +/// Ipld::String("programming".into()), +/// ]), +/// ), +/// ("is_cool".into(), Ipld::Bool(true)), +/// ]) +/// }); +/// +/// let person = from_ipld(ipld); +/// assert!(matches!(person, Ok(Person { .. }))); +/// ``` +// NOTE vmx 2021-12-22: Taking by value is also what `serde_json` does. +pub fn from_ipld(value: Ipld) -> Result +where + T: serde::de::DeserializeOwned, +{ + T::deserialize(value) +} + +impl<'de> de::Deserialize<'de> for Ipld { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct IpldVisitor; + + impl<'de> de::Visitor<'de> for IpldVisitor { + type Value = Ipld; + + fn expecting(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.write_str("any valid IPLD kind") + } + + #[inline] + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + Ok(Ipld::String(String::from(value))) + } + + #[inline] + fn visit_bytes(self, v: &[u8]) -> Result + where + E: de::Error, + { + self.visit_byte_buf(v.to_owned()) + } + + #[inline] + fn visit_byte_buf(self, v: Vec) -> Result + where + E: de::Error, + { + Ok(Ipld::Bytes(v)) + } + + #[inline] + fn visit_u64(self, v: u64) -> Result + where + E: de::Error, + { + Ok(Ipld::Integer(v.into())) + } + + #[inline] + fn visit_i64(self, v: i64) -> Result + where + E: de::Error, + { + Ok(Ipld::Integer(v.into())) + } + + #[inline] + fn visit_i128(self, v: i128) -> Result + where + E: de::Error, + { + Ok(Ipld::Integer(v)) + } + + #[inline] + fn visit_f64(self, v: f64) -> Result + where + E: de::Error, + { + Ok(Ipld::Float(v)) + } + + #[inline] + fn visit_bool(self, v: bool) -> Result + where + E: de::Error, + { + Ok(Ipld::Bool(v)) + } + + #[inline] + fn visit_none(self) -> Result + where + E: de::Error, + { + Ok(Ipld::Null) + } + + #[inline] + fn visit_some(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Deserialize::deserialize(deserializer) + } + + #[inline] + fn visit_unit(self) -> Result + where + E: de::Error, + { + Ok(Ipld::Null) + } + + #[inline] + fn visit_seq(self, mut visitor: V) -> Result + where + V: de::SeqAccess<'de>, + { + let mut vec = Vec::with_capacity(visitor.size_hint().unwrap_or(0)); + + while let Some(elem) = visitor.next_element()? { + vec.push(elem); + } + + Ok(Ipld::List(vec)) + } + + #[inline] + fn visit_map(self, mut visitor: V) -> Result + where + V: de::MapAccess<'de>, + { + let mut values = BTreeMap::new(); + + while let Some((key, value)) = visitor.next_entry()? { + let prev_value = values.insert(key, value); + if prev_value.is_some() { + return Err(de::Error::custom("Duplicate map key")); + } + } + + Ok(Ipld::Map(values)) + } + + /// Newtype structs are only used to deserialize CIDs. + #[inline] + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + deserializer + .deserialize_bytes(BytesToCidVisitor) + .map(Ipld::Link) + } + } + + deserializer.deserialize_any(IpldVisitor) + } +} + +macro_rules! impl_deserialize_integer { + ($ty:ident, $deserialize:ident, $visit:ident) => { + fn $deserialize>(self, visitor: V) -> Result { + match self { + Self::Integer(integer) => match $ty::try_from(integer) { + Ok(int) => visitor.$visit(int), + Err(_) => error(format!( + "`Ipld::Integer` value was bigger than `{}`", + stringify!($ty) + )), + }, + _ => error(format!( + "Only `Ipld::Integer` can be deserialized to `{}`, input was `{:#?}`", + stringify!($ty), + self + )), + } + } + }; +} + +/// A Deserializer for CIDs. +/// +/// A separate deserializer is needed to make sure we always deserialize only CIDs as `Ipld::Link` +/// and don't deserialize arbitrary bytes. +struct CidDeserializer(Cid); + +impl<'de> de::Deserializer<'de> for CidDeserializer { + type Error = SerdeError; + + #[inline] + fn deserialize_any>(self, _visitor: V) -> Result { + error("Only bytes can be deserialized into a CID") + } + + fn deserialize_bytes>(self, visitor: V) -> Result { + visitor.visit_bytes(&self.0.to_bytes()) + } + + forward_to_deserialize_any! { + bool byte_buf char enum f32 f64 i8 i16 i32 i64 identifier ignored_any map newtype_struct + option seq str string struct tuple tuple_struct u8 u16 u32 u64 unit unit_struct + } +} + +/// Deserialize from an [`Ipld`] enum into a Rust type. +/// +/// The deserialization will return an error if you try to deserialize into an integer type that +/// would be too small to hold the value stored in [`Ipld::Integer`]. +/// +/// [`Ipld::Floats`] can be converted to `f32` if there is no of precision, else it will error. +impl<'de> de::Deserializer<'de> for Ipld { + type Error = SerdeError; + + #[inline] + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + match self { + Self::Null => visitor.visit_none(), + Self::Bool(bool) => visitor.visit_bool(bool), + Self::Integer(i128) => visitor.visit_i128(i128), + Self::Float(f64) => visitor.visit_f64(f64), + Self::String(string) => visitor.visit_str(&string), + Self::Bytes(bytes) => visitor.visit_bytes(&bytes), + Self::List(list) => visit_seq(list, visitor), + Self::Map(map) => visit_map(map, visitor), + Self::Link(cid) => visitor.visit_newtype_struct(CidDeserializer(cid)), + } + } + + fn deserialize_unit>(self, visitor: V) -> Result { + match self { + Self::Null => visitor.visit_unit(), + _ => error(format!( + "Only `Ipld::Null` can be deserialized to unit, input was `{:#?}`", + self + )), + } + } + + fn deserialize_bool>(self, visitor: V) -> Result { + match self { + Self::Bool(bool) => visitor.visit_bool(bool), + _ => error(format!( + "Only `Ipld::Bool` can be deserialized to bool, input was `{:#?}`", + self + )), + } + } + + impl_deserialize_integer!(i8, deserialize_i8, visit_i8); + impl_deserialize_integer!(i16, deserialize_i16, visit_i16); + impl_deserialize_integer!(i32, deserialize_i32, visit_i32); + impl_deserialize_integer!(i64, deserialize_i64, visit_i64); + + impl_deserialize_integer!(u8, deserialize_u8, visit_u8); + impl_deserialize_integer!(u16, deserialize_u16, visit_u16); + impl_deserialize_integer!(u32, deserialize_u32, visit_u32); + impl_deserialize_integer!(u64, deserialize_u64, visit_u64); + + fn deserialize_f32>(self, visitor: V) -> Result { + match self { + Self::Float(float) => { + if !float.is_finite() { + error(format!("`Ipld::Float` must be a finite number, not infinity or NaN, input was `{}`", float)) + } else if (float as f32) as f64 != float { + error( + "`Ipld::Float` cannot be deserialized to `f32`, without loss of precision`", + ) + } else { + visitor.visit_f32(float as f32) + } + } + _ => error(format!( + "Only `Ipld::Float` can be deserialized to `f32`, input was `{:#?}`", + self + )), + } + } + + fn deserialize_f64>(self, visitor: V) -> Result { + match self { + Self::Float(float) => { + if float.is_finite() { + visitor.visit_f64(float) + } else { + error(format!("`Ipld::Float` must be a finite number, not infinity or NaN, input was `{}`", float)) + } + } + _ => error(format!( + "Only `Ipld::Float` can be deserialized to `f64`, input was `{:#?}`", + self + )), + } + } + + fn deserialize_char>(self, visitor: V) -> Result { + match self { + Self::String(string) => { + if string.chars().count() == 1 { + visitor.visit_char(string.chars().next().unwrap()) + } else { + error("`Ipld::String` was longer than a single character") + } + } + _ => error(format!( + "Only `Ipld::String` can be deserialized to string, input was `{:#?}`", + self + )), + } + } + + fn deserialize_str>(self, visitor: V) -> Result { + match self { + Self::String(string) => visitor.visit_str(&string), + _ => error(format!( + "Only `Ipld::String` can be deserialized to string, input was `{:#?}`", + self + )), + } + } + + fn deserialize_string>(self, visitor: V) -> Result { + match self { + Self::String(string) => visitor.visit_string(string), + _ => error(format!( + "Only `Ipld::String` can be deserialized to string, input was `{:#?}`", + self + )), + } + } + + fn deserialize_bytes>(self, visitor: V) -> Result { + match self { + Self::Bytes(bytes) => visitor.visit_bytes(&bytes), + _ => error(format!( + "Only `Ipld::Bytes` can be deserialized to bytes, input was `{:#?}`", + self + )), + } + } + + fn deserialize_byte_buf>( + self, + visitor: V, + ) -> Result { + match self { + Self::Bytes(bytes) => visitor.visit_byte_buf(bytes), + _ => error(format!( + "Only `Ipld::Bytes` can be deserialized to bytes, input was `{:#?}`", + self + )), + } + } + + fn deserialize_seq>(self, visitor: V) -> Result { + match self { + Self::List(list) => visit_seq(list, visitor), + _ => error(format!( + "Only `Ipld::List` can be deserialized to sequence, input was `{:#?}`", + self + )), + } + } + + fn deserialize_tuple>( + self, + len: usize, + visitor: V, + ) -> Result { + match self { + Self::List(list) => { + if len == list.len() { + visit_seq(list, visitor) + } else { + error(format!("The tuple size must match the length of the `Ipld::List`, tuple size: {}, `Ipld::List` length: {}", len, list.len())) + } + } + _ => error(format!( + "Only `Ipld::List` can be deserialized to tuple, input was `{:#?}`", + self + )), + } + } + + fn deserialize_tuple_struct>( + self, + _name: &str, + len: usize, + visitor: V, + ) -> Result { + self.deserialize_tuple(len, visitor) + } + + fn deserialize_map>(self, visitor: V) -> Result { + match self { + Self::Map(map) => visit_map(map, visitor), + _ => error(format!( + "Only `Ipld::Map` can be deserialized to map, input was `{:#?}`", + self + )), + } + } + + fn deserialize_identifier>( + self, + visitor: V, + ) -> Result { + match self { + Self::String(string) => visitor.visit_str(&string), + _ => error(format!( + "Only `Ipld::String` can be deserialized to identifier, input was `{:#?}`", + self + )), + } + } + + fn deserialize_struct>( + self, + _name: &str, + _fields: &[&str], + visitor: V, + ) -> Result { + match self { + Self::Map(map) => visit_map(map, visitor), + _ => error(format!( + "Only `Ipld::Map` can be deserialized to struct, input was `{:#?}`", + self + )), + } + } + + fn deserialize_unit_struct>( + self, + _name: &str, + _visitor: V, + ) -> Result { + error("Unit struct cannot be deserialized") + } + + fn deserialize_newtype_struct>( + self, + name: &str, + visitor: V, + ) -> Result { + if name == CID_SERDE_PRIVATE_IDENTIFIER { + match self { + Ipld::Link(cid) => visitor.visit_newtype_struct(CidDeserializer(cid)), + _ => error(format!( + "Only `Ipld::Link`s can be deserialized to CIDs, input was `{:#?}`", + self + )), + } + } else { + visitor.visit_newtype_struct(self) + } + } + + // Heavily based on + // https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/de.rs#L249 + fn deserialize_enum>( + self, + _name: &str, + _variants: &[&str], + visitor: V, + ) -> Result { + let (variant, value) = match self { + Ipld::Map(map) => { + let mut iter = map.into_iter(); + let (variant, value) = match iter.next() { + Some(v) => v, + None => { + return error( + "Only `Ipld::Map`s with a single key can be deserialized to `enum`, input had no keys" + ); + } + }; + // Enums are encoded in IPLD as maps with a single key-value pair + if iter.next().is_some() { + return error( + "Only `Ipld::Map`s with a single key can be deserialized to `enum`, input had more keys" + ); + } + (variant, Some(value)) + } + Ipld::String(variant) => (variant, None), + _ => return error(format!( + "Only `Ipld::Map` and `Ipld::String` can be deserialized to `enum`, input was `{:#?}`", + self + )), + }; + + visitor.visit_enum(EnumDeserializer { variant, value }) + } + + // Heavily based on + // https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/de.rs#L446 + fn deserialize_ignored_any>( + self, + visitor: V, + ) -> Result { + drop(self); + visitor.visit_unit() + } + + fn deserialize_option>(self, visitor: V) -> Result { + match self { + Self::Null => visitor.visit_none(), + _ => visitor.visit_some(self), + } + } + + fn is_human_readable(&self) -> bool { + false + } +} + +fn visit_map<'de, V>(map: BTreeMap, visitor: V) -> Result +where + V: de::Visitor<'de>, +{ + let mut deserializer = MapDeserializer::new(map); + visitor.visit_map(&mut deserializer) +} + +fn visit_seq<'de, V>(list: Vec, visitor: V) -> Result +where + V: de::Visitor<'de>, +{ + let mut deserializer = SeqDeserializer::new(list); + visitor.visit_seq(&mut deserializer) +} + +// Heavily based on +// https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/de.rs#L601 +struct MapDeserializer { + iter: as IntoIterator>::IntoIter, + value: Option, +} + +impl MapDeserializer { + fn new(map: BTreeMap) -> Self { + Self { + iter: map.into_iter(), + value: None, + } + } +} + +impl<'de> de::MapAccess<'de> for MapDeserializer { + type Error = SerdeError; + + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: de::DeserializeSeed<'de>, + { + match self.iter.next() { + Some((key, value)) => { + self.value = Some(value); + seed.deserialize(Ipld::String(key)).map(Some) + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + match self.value.take() { + Some(value) => seed.deserialize(value), + None => error("value is missing"), + } + } + + fn size_hint(&self) -> Option { + match self.iter.size_hint() { + (lower, Some(upper)) if lower == upper => Some(upper), + _ => None, + } + } +} + +// Heavily based on +// https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/de.rs#L554 +struct SeqDeserializer { + iter: as IntoIterator>::IntoIter, +} + +impl SeqDeserializer { + fn new(vec: Vec) -> Self { + Self { + iter: vec.into_iter(), + } + } +} + +impl<'de> de::SeqAccess<'de> for SeqDeserializer { + type Error = SerdeError; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match self.iter.next() { + Some(value) => seed.deserialize(value).map(Some), + None => Ok(None), + } + } + + fn size_hint(&self) -> Option { + match self.iter.size_hint() { + (lower, Some(upper)) if lower == upper => Some(upper), + _ => None, + } + } +} + +// Heavily based on +// https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/de.rs#L455 +struct EnumDeserializer { + variant: String, + value: Option, +} + +impl<'de> de::EnumAccess<'de> for EnumDeserializer { + type Error = SerdeError; + type Variant = VariantDeserializer; + + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: de::DeserializeSeed<'de>, + { + let variant = self.variant.into_deserializer(); + let visitor = VariantDeserializer(self.value); + seed.deserialize(variant).map(|v| (v, visitor)) + } +} + +// Heavily based on +// https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/de.rs#L482 +struct VariantDeserializer(Option); + +impl<'de> de::VariantAccess<'de> for VariantDeserializer { + type Error = SerdeError; + + fn unit_variant(self) -> Result<(), Self::Error> { + match self.0 { + Some(value) => de::Deserialize::deserialize(value), + None => Ok(()), + } + } + + fn newtype_variant_seed(self, seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + match self.0 { + Some(value) => seed.deserialize(value), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"newtype variant", + )), + } + } + + fn tuple_variant(self, len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + match self.0 { + Some(Ipld::List(list)) => { + if len == list.len() { + visit_seq(list, visitor) + } else { + error(format!("The tuple variant size must match the length of the `Ipld::List`, tuple variant size: {}, `Ipld::List` length: {}", len, list.len())) + } + } + Some(_) => error(format!( + "Only `Ipld::List` can be deserialized to tuple variant, input was `{:#?}`", + self.0 + )), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"tuple variant", + )), + } + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + match self.0 { + Some(Ipld::Map(v)) => visit_map(v, visitor), + Some(_) => error(format!( + "Only `Ipld::Map` can be deserialized to struct variant, input was `{:#?}`", + self.0 + )), + None => Err(de::Error::invalid_type( + de::Unexpected::UnitVariant, + &"struct variant", + )), + } + } +} + +/// Returns a general error. +fn error(message: S) -> Result +where + S: AsRef + fmt::Display, +{ + Err(de::Error::custom(message)) +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/extract_links.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/extract_links.rs new file mode 100644 index 000000000000..4fb760f26433 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/extract_links.rs @@ -0,0 +1,147 @@ +use alloc::{vec, vec::Vec}; +use core::fmt; + +use cid::CidGeneric; +use serde::{de, Deserialize}; +use serde_bytes::ByteBuf; + +/// Extract links from an `ipld_serde_dag*` codec. +#[derive(Debug)] +pub struct ExtractLinks { + links: Vec>, +} + +impl ExtractLinks { + /// Get the extracted links (CIDs). + pub fn into_vec(self) -> Vec> { + self.links + } +} + +impl<'de, const S: usize> de::Visitor<'de> for ExtractLinks { + type Value = Vec>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("anything at all") + } + + #[inline] + fn visit_bool(self, _value: bool) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_i64(self, _value: i64) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_i128(self, _value: i128) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_u64(self, _value: u64) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_u128(self, _value: u128) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_f64(self, _value: f64) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_str(self, _value: &str) -> Result + where + E: de::Error, + { + Ok(Vec::new()) + } + + #[inline] + fn visit_none(self) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_some(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ok(Self::deserialize(deserializer)?.links) + } + + #[inline] + fn visit_newtype_struct(self, deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + // No DAG-* format has the idea of a newtyp struct. Therefore when visiting a newtype + // struct, we can be sure that it's from deserializing a CID. + let bytes = ByteBuf::deserialize(deserializer)?; + let cid = + CidGeneric::try_from(&bytes[..]).map_err(|_| de::Error::custom("Cannot decode CID"))?; + Ok(vec![cid]) + } + + #[inline] + fn visit_unit(self) -> Result { + Ok(Vec::new()) + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let mut links = Vec::new(); + while let Some(mut maybe_links) = seq.next_element::()? { + links.append(&mut maybe_links.links) + } + Ok(links) + } + + #[inline] + fn visit_map(self, mut map: A) -> Result + where + A: de::MapAccess<'de>, + { + let mut links = Vec::new(); + while let Some((_, mut maybe_links)) = map.next_entry::()? { + links.append(&mut maybe_links.links) + } + Ok(links) + } + + #[inline] + fn visit_bytes(self, _value: &[u8]) -> Result + where + E: de::Error, + { + Ok(Vec::new()) + } + + fn visit_enum(self, data: A) -> Result + where + A: de::EnumAccess<'de>, + { + use serde::de::VariantAccess; + data.variant::()?.1.newtype_variant() + } +} + +impl<'de, const S: usize> de::Deserialize<'de> for ExtractLinks { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + let links = deserializer.deserialize_any(Self { links: Vec::new() })?; + Ok(Self { links }) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/mod.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/mod.rs new file mode 100644 index 000000000000..f1080147c905 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/mod.rs @@ -0,0 +1,164 @@ +//! Serde (de)serialization for [`crate::ipld::Ipld`]. +//! +//! This implementation enables Serde to serialize to/deserialize from [`crate::ipld::Ipld`] +//! values. The `Ipld` enum is similar to the `Value` enum in `serde_json` or `serde_cbor`. +mod de; +mod extract_links; +mod ser; + +use alloc::string::{String, ToString}; +use core::fmt; + +pub use de::from_ipld; +pub use extract_links::ExtractLinks; +pub use ser::{to_ipld, Serializer}; + +/// Error during Serde operations. +#[derive(Clone, Debug)] +pub struct SerdeError(String); + +impl fmt::Display for SerdeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "serde error: {}", self.0) + } +} + +impl serde::de::Error for SerdeError { + fn custom(message: T) -> Self { + Self(message.to_string()) + } +} + +impl serde::ser::Error for SerdeError { + fn custom(message: T) -> Self { + Self(message.to_string()) + } +} + +impl serde::ser::StdError for SerdeError {} + +#[cfg(test)] +mod tests { + use alloc::{collections::BTreeMap, string::String, vec, vec::Vec}; + use core::fmt; + + use cid::serde::CID_SERDE_PRIVATE_IDENTIFIER; + use cid::Cid; + use serde::{de::DeserializeOwned, Serialize}; + use serde_derive::Deserialize; + use serde_test::{assert_tokens, Token}; + + use crate::ipld::Ipld; + use crate::serde::{from_ipld, to_ipld}; + + /// Utility for testing (de)serialization of [`Ipld`]. + /// + /// Checks if `data` and `ipld` match if they are encoded into each other. + fn assert_roundtrip(data: &T, ipld: &Ipld) + where + T: Serialize + DeserializeOwned + PartialEq + fmt::Debug, + { + let encoded: Ipld = to_ipld(data).unwrap(); + assert_eq!(&encoded, ipld); + let decoded: T = from_ipld(ipld.clone()).unwrap(); + assert_eq!(&decoded, data); + } + + #[derive(Debug, Deserialize, PartialEq, Serialize)] + struct Person { + name: String, + age: u8, + hobbies: Vec, + is_cool: bool, + link: Cid, + } + + impl Default for Person { + fn default() -> Self { + Self { + name: "Hello World!".into(), + age: 52, + hobbies: vec!["geography".into(), "programming".into()], + is_cool: true, + link: Cid::try_from("bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily") + .unwrap(), + } + } + } + + #[test] + fn test_tokens() { + let person = Person::default(); + + assert_tokens( + &person, + &[ + Token::Struct { + name: "Person", + len: 5, + }, + Token::Str("name"), + Token::Str("Hello World!"), + Token::Str("age"), + Token::U8(52), + Token::Str("hobbies"), + Token::Seq { len: Some(2) }, + Token::Str("geography"), + Token::Str("programming"), + Token::SeqEnd, + Token::Str("is_cool"), + Token::Bool(true), + Token::Str("link"), + Token::NewtypeStruct { + name: CID_SERDE_PRIVATE_IDENTIFIER, + }, + Token::Bytes(&[ + 0x01, 0x71, 0x12, 0x20, 0x35, 0x4d, 0x45, 0x5f, 0xf3, 0xa6, 0x41, 0xb8, 0xca, + 0xc2, 0x5c, 0x38, 0xa7, 0x7e, 0x64, 0xaa, 0x73, 0x5d, 0xc8, 0xa4, 0x89, 0x66, + 0xa6, 0xf, 0x1a, 0x78, 0xca, 0xa1, 0x72, 0xa4, 0x88, 0x5e, + ]), + Token::StructEnd, + ], + ); + } + + /// Test if converting to a struct from [`crate::ipld::Ipld`] and back works. + #[test] + fn test_ipld() { + let person = Person::default(); + + let expected_ipld = Ipld::Map({ + BTreeMap::from([ + ("name".into(), Ipld::String("Hello World!".into())), + ("age".into(), Ipld::Integer(52)), + ( + "hobbies".into(), + Ipld::List(vec![ + Ipld::String("geography".into()), + Ipld::String("programming".into()), + ]), + ), + ("is_cool".into(), Ipld::Bool(true)), + ("link".into(), Ipld::Link(person.link)), + ]) + }); + + assert_roundtrip(&person, &expected_ipld); + } + + /// Test that deserializing arbitrary bytes are not accidentally recognized as CID. + #[test] + fn test_bytes_not_cid() { + let cid = + Cid::try_from("bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily").unwrap(); + + let bytes_not_cid = Ipld::Bytes(cid.to_bytes()); + let not_a_cid: Result = from_ipld(bytes_not_cid); + assert!(not_a_cid.is_err()); + + // Make sure that a Ipld::Link deserializes correctly though. + let link = Ipld::Link(cid); + let a_cid: Cid = from_ipld(link).unwrap(); + assert_eq!(a_cid, cid); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/ser.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/ser.rs new file mode 100644 index 000000000000..19998f254068 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/src/serde/ser.rs @@ -0,0 +1,486 @@ +// Parts of this code is based on +// https://github.com/serde-rs/json/blob/95f67a09399d546d9ecadeb747a845a77ff309b2/src/value/ser.rs +use alloc::{ + borrow::ToOwned, + collections::BTreeMap, + format, + string::{String, ToString}, + vec::Vec, +}; +use core::convert::TryFrom; + +use cid::serde::CID_SERDE_PRIVATE_IDENTIFIER; +use cid::Cid; +use serde::ser; + +use crate::{ipld::Ipld, serde::SerdeError}; + +/// Serialize into instances of [`crate::ipld::Ipld`]. +/// +/// All Rust types can be serialized to [`crate::ipld::Ipld`], here is a list of how they are +/// converted: +/// +/// - bool -> `Ipld::Bool` +/// - i8, i16, i32, i64, i128, isize, u8, u16, u32, u64, usize -> `Ipld::Integer` +/// - f32, f64 -> `Ipld::Float` +/// - char, String -> `Ipld::String` +/// - slices -> `Ipld::List` +/// - struct +/// - struct -> `Ipld::Map` +/// - newtype struct -> the value the struct wraps +/// - tuple struct -> `Ipld::List` +/// - unit struct -> cannot be serialized, it errors +/// - enum: +/// - unit variant -> `Ipld::String` of the variant name +/// - newtype variant -> single element `Ipld::Map`, key: variant name, value: the one the +/// newtype wraps +/// - tuple variant -> single element `Ipld::Map`, key: variant name, value: `Ipld::List` +/// - struct variant -> single element `Ipld::Map`, key: variant name, value: `Ipld::Map` +/// - unit (`()`) -> cannot be serialized, it errors +/// +/// There are also common compound types that are supported: +/// +/// - [`std::option::Option`] -> eithe `Ipld::Null` or the value +/// - [`serde_bytes::ByteBuf`] -> `Ipld::Bytes` +/// - lists (like e.g. [`std::vec::Vec`]) -> `Ipld::List` +/// - maps (like e.g. [`std::collections::BTreeMap`]) -> `Ipld::Map` +/// - [`cid::Cid`] -> `Ipld::Link` +/// +/// +/// # Example +/// +/// ``` +/// use serde_derive::Serialize; +/// use ipld_core::ipld::Ipld; +/// use ipld_core::serde::to_ipld; +/// +/// #[derive(Serialize)] +/// struct Person { +/// name: String, +/// age: u8, +/// hobbies: Vec, +/// is_cool: bool, +/// } +/// +/// let person = Person { +/// name: "Hello World!".into(), +/// age: 52, +/// hobbies: vec!["geography".into(), "programming".into()], +/// is_cool: true, +/// }; +/// +/// let ipld = to_ipld(person); +/// assert!(matches!(ipld, Ok(Ipld::Map(_)))); +/// ``` +pub fn to_ipld(value: T) -> Result +where + T: ser::Serialize, +{ + value.serialize(Serializer) +} + +impl ser::Serialize for Ipld { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + match &self { + Self::Null => serializer.serialize_none(), + Self::Bool(value) => serializer.serialize_bool(*value), + Self::Integer(value) => serializer.serialize_i128(*value), + Self::Float(value) => serializer.serialize_f64(*value), + Self::String(value) => serializer.serialize_str(value), + Self::Bytes(value) => serializer.serialize_bytes(value), + Self::List(value) => serializer.collect_seq(value), + Self::Map(value) => serializer.collect_map(value), + Self::Link(value) => value.serialize(serializer), + } + } +} + +/// The IPLD serializer. +pub struct Serializer; + +impl serde::Serializer for Serializer { + type Ok = Ipld; + type Error = SerdeError; + + type SerializeSeq = SerializeVec; + type SerializeTuple = SerializeVec; + type SerializeTupleStruct = SerializeVec; + type SerializeTupleVariant = SerializeTupleVariant; + type SerializeMap = SerializeMap; + type SerializeStruct = SerializeMap; + type SerializeStructVariant = SerializeStructVariant; + + #[inline] + fn serialize_bool(self, value: bool) -> Result { + Ok(Self::Ok::Bool(value)) + } + + #[inline] + fn serialize_i8(self, value: i8) -> Result { + self.serialize_i64(i64::from(value)) + } + + #[inline] + fn serialize_i16(self, value: i16) -> Result { + self.serialize_i64(i64::from(value)) + } + + #[inline] + fn serialize_i32(self, value: i32) -> Result { + self.serialize_i64(i64::from(value)) + } + + #[inline] + fn serialize_i64(self, value: i64) -> Result { + self.serialize_i128(i128::from(value)) + } + + fn serialize_i128(self, value: i128) -> Result { + Ok(Self::Ok::Integer(value)) + } + + #[inline] + fn serialize_u8(self, value: u8) -> Result { + self.serialize_i128(value.into()) + } + + #[inline] + fn serialize_u16(self, value: u16) -> Result { + self.serialize_i128(value.into()) + } + + #[inline] + fn serialize_u32(self, value: u32) -> Result { + self.serialize_i128(value.into()) + } + + #[inline] + fn serialize_u64(self, value: u64) -> Result { + self.serialize_i128(value.into()) + } + + #[inline] + fn serialize_f32(self, value: f32) -> Result { + self.serialize_f64(f64::from(value)) + } + + #[inline] + fn serialize_f64(self, value: f64) -> Result { + Ok(Self::Ok::Float(value)) + } + + #[inline] + fn serialize_char(self, value: char) -> Result { + self.serialize_str(&value.to_string()) + } + + #[inline] + fn serialize_str(self, value: &str) -> Result { + Ok(Self::Ok::String(value.to_owned())) + } + + fn serialize_bytes(self, value: &[u8]) -> Result { + Ok(Self::Ok::Bytes(value.to_vec())) + } + + #[inline] + fn serialize_unit(self) -> Result { + Err(ser::Error::custom("Unit is not supported")) + } + + #[inline] + fn serialize_unit_struct(self, _name: &'static str) -> Result { + Err(ser::Error::custom("Unit structs are not supported")) + } + + #[inline] + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + ) -> Result { + self.serialize_str(variant) + } + + #[inline] + fn serialize_newtype_struct( + self, + name: &'static str, + value: &T, + ) -> Result + where + T: ser::Serialize, + { + let ipld = value.serialize(self); + if name == CID_SERDE_PRIVATE_IDENTIFIER { + if let Ok(Ipld::Bytes(bytes)) = ipld { + let cid = Cid::try_from(bytes) + .map_err(|err| ser::Error::custom(format!("Invalid CID: {}", err)))?; + return Ok(Self::Ok::Link(cid)); + } + } + ipld + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + value: &T, + ) -> Result + where + T: ser::Serialize, + { + let values = BTreeMap::from([(variant.to_owned(), value.serialize(self)?)]); + Ok(Self::Ok::Map(values)) + } + + #[inline] + fn serialize_none(self) -> Result { + Ok(Self::Ok::Null) + } + + #[inline] + fn serialize_some(self, value: &T) -> Result + where + T: ser::Serialize, + { + value.serialize(self) + } + + fn serialize_seq(self, len: Option) -> Result { + Ok(SerializeVec { + vec: Vec::with_capacity(len.unwrap_or(0)), + }) + } + + fn serialize_tuple(self, len: usize) -> Result { + self.serialize_seq(Some(len)) + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + len: usize, + ) -> Result { + self.serialize_tuple(len) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + Ok(SerializeTupleVariant { + name: String::from(variant), + vec: Vec::with_capacity(len), + }) + } + + fn serialize_map(self, _len: Option) -> Result { + Ok(SerializeMap { + map: BTreeMap::new(), + next_key: None, + }) + } + + fn serialize_struct( + self, + _name: &'static str, + len: usize, + ) -> Result { + self.serialize_map(Some(len)) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + _len: usize, + ) -> Result { + Ok(SerializeStructVariant { + name: String::from(variant), + map: BTreeMap::new(), + }) + } + + #[inline] + fn is_human_readable(&self) -> bool { + false + } +} + +pub struct SerializeVec { + vec: Vec, +} + +pub struct SerializeTupleVariant { + name: String, + vec: Vec, +} + +pub struct SerializeMap { + map: BTreeMap, + next_key: Option, +} + +pub struct SerializeStructVariant { + name: String, + map: BTreeMap, +} + +impl ser::SerializeSeq for SerializeVec { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + self.vec.push(value.serialize(Serializer)?); + Ok(()) + } + + fn end(self) -> Result { + Ok(Self::Ok::List(self.vec)) + } +} + +impl ser::SerializeTuple for SerializeVec { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + ser::SerializeSeq::serialize_element(self, value) + } + + fn end(self) -> Result { + ser::SerializeSeq::end(self) + } +} + +impl ser::SerializeTupleStruct for SerializeVec { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + ser::SerializeSeq::serialize_element(self, value) + } + + fn end(self) -> Result { + ser::SerializeSeq::end(self) + } +} + +impl ser::SerializeTupleVariant for SerializeTupleVariant { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + self.vec.push(value.serialize(Serializer)?); + Ok(()) + } + + fn end(self) -> Result { + let map = BTreeMap::from([(self.name, Self::Ok::List(self.vec))]); + Ok(Self::Ok::Map(map)) + } +} + +impl ser::SerializeMap for SerializeMap { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + match key.serialize(Serializer)? { + Ipld::String(string_key) => { + self.next_key = Some(string_key); + Ok(()) + } + _ => Err(ser::Error::custom("Map keys must be strings".to_string())), + } + } + + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + let key = self.next_key.take(); + // Panic because this indicates a bug in the program rather than an + // expected failure. + let key = key.expect("serialize_value called before serialize_key"); + self.map.insert(key, value.serialize(Serializer)?); + Ok(()) + } + + fn end(self) -> Result { + Ok(Self::Ok::Map(self.map)) + } +} + +impl ser::SerializeStruct for SerializeMap { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_field( + &mut self, + key: &'static str, + value: &T, + ) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + serde::ser::SerializeMap::serialize_key(self, key)?; + serde::ser::SerializeMap::serialize_value(self, value) + } + + fn end(self) -> Result { + serde::ser::SerializeMap::end(self) + } +} + +impl ser::SerializeStructVariant for SerializeStructVariant { + type Ok = Ipld; + type Error = SerdeError; + + fn serialize_field( + &mut self, + key: &'static str, + value: &T, + ) -> Result<(), Self::Error> + where + T: ser::Serialize, + { + self.map + .insert(key.to_string(), value.serialize(Serializer)?); + Ok(()) + } + + fn end(self) -> Result { + let mut object = BTreeMap::new(); + + object.insert(self.name, Self::Ok::Map(self.map)); + + Ok(Self::Ok::Map(object)) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/macros.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/macros.rs new file mode 100644 index 000000000000..a330910bce73 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/macros.rs @@ -0,0 +1,21 @@ +use ipld_core::{cid::Cid, ipld, ipld::Ipld}; + +#[test] +fn test_macro() { + let _: Ipld = ipld!(null); + let _: Ipld = ipld!(true); + let _: Ipld = ipld!(false); + let _: Ipld = ipld!(1); + let _: Ipld = ipld!(1.0); + let a: Ipld = ipld!("string"); + let _: Ipld = ipld!([]); + let _: Ipld = ipld!([1, 2, 3]); + let _: Ipld = ipld!({}); + let _: Ipld = ipld!({ + "bye": null, + "numbers": [1, 2, 3], + "a": a, + }); + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let _: Ipld = ipld!(cid); +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_deserialize.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_deserialize.rs new file mode 100644 index 000000000000..c1044558e877 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_deserialize.rs @@ -0,0 +1,155 @@ +#![cfg(feature = "serde")] + +extern crate alloc; + +use alloc::collections::BTreeMap; +use core::convert::TryFrom; + +use serde_test::{assert_de_tokens, Token}; + +use ipld_core::cid::{serde::CID_SERDE_PRIVATE_IDENTIFIER, Cid}; +use ipld_core::ipld::Ipld; + +#[test] +fn ipld_deserialize_null() { + let ipld = Ipld::Null; + assert_de_tokens(&ipld, &[Token::None]); +} + +#[test] +fn ipld_deserialize_null_as_unit() { + let ipld = Ipld::Null; + assert_de_tokens(&ipld, &[Token::Unit]); +} + +#[test] +fn ipld_deserialize_null_as_unit_struct() { + let ipld = Ipld::Null; + assert_de_tokens(&ipld, &[Token::UnitStruct { name: "foo" }]); +} + +#[test] +fn ipld_deserialize_bool() { + let bool = true; + let ipld = Ipld::Bool(bool); + assert_de_tokens(&ipld, &[Token::Bool(bool)]); +} + +#[test] +fn ipld_deserialize_integer_u() { + let integer = 32u8; + let ipld = Ipld::Integer(integer.into()); + assert_de_tokens(&ipld, &[Token::U8(integer)]); + assert_de_tokens(&ipld, &[Token::U16(integer.into())]); + assert_de_tokens(&ipld, &[Token::U32(integer.into())]); + assert_de_tokens(&ipld, &[Token::U64(integer.into())]); +} + +#[test] +fn ipld_deserialize_integer_i() { + let integer = -32i8; + let ipld = Ipld::Integer(integer.into()); + assert_de_tokens(&ipld, &[Token::I8(integer)]); + assert_de_tokens(&ipld, &[Token::I16(integer.into())]); + assert_de_tokens(&ipld, &[Token::I32(integer.into())]); + assert_de_tokens(&ipld, &[Token::I64(integer.into())]); +} + +#[test] +fn ipld_deserialize_float() { + let float = 32.41f32; + let ipld = Ipld::Float(float.into()); + assert_de_tokens(&ipld, &[Token::F32(float)]); + assert_de_tokens(&ipld, &[Token::F64(float.into())]); +} + +#[test] +fn ipld_deserialize_string() { + let string = "hello"; + let ipld = Ipld::String(string.into()); + assert_de_tokens(&ipld, &[Token::Str(string)]); + assert_de_tokens(&ipld, &[Token::BorrowedStr(string)]); + assert_de_tokens(&ipld, &[Token::String(string)]); +} + +#[test] +fn ipld_deserialize_string_char() { + let char = 'h'; + let ipld = Ipld::String(char.into()); + assert_de_tokens(&ipld, &[Token::Char(char)]); +} + +#[test] +fn ipld_deserialize_bytes() { + let bytes = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::Bytes(bytes); + assert_de_tokens(&ipld, &[Token::Bytes(b"hello")]); + assert_de_tokens(&ipld, &[Token::BorrowedBytes(b"hello")]); + assert_de_tokens(&ipld, &[Token::ByteBuf(b"hello")]); +} + +#[test] +fn ipld_deserialize_list() { + let ipld = Ipld::List(vec![Ipld::Bool(false), Ipld::Float(22.7)]); + assert_de_tokens( + &ipld, + &[ + Token::Seq { len: Some(2) }, + Token::Bool(false), + Token::F64(22.7), + Token::SeqEnd, + ], + ); +} + +#[test] +fn ipld_deserialize_map() { + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".to_string(), Ipld::Bool(true)), + ("world!".to_string(), Ipld::Bool(false)), + ])); + assert_de_tokens( + &ipld, + &[ + Token::Map { len: Some(2) }, + Token::Str("hello"), + Token::Bool(true), + Token::Str("world!"), + Token::Bool(false), + Token::MapEnd, + ], + ); +} + +#[test] +fn ipld_deserialize_link() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + assert_de_tokens( + &ipld, + &[ + Token::NewtypeStruct { + name: CID_SERDE_PRIVATE_IDENTIFIER, + }, + Token::Bytes(&[ + 1, 85, 18, 32, 159, 228, 204, 198, 222, 22, 114, 79, 58, 48, 199, 232, 242, 84, + 243, 198, 71, 25, 134, 172, 177, 248, 216, 207, 142, 150, 206, 42, 215, 219, 231, + 251, + ]), + ], + ); +} + +#[test] +#[should_panic] +fn ipld_deserialize_link_not_as_bytes() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + assert_de_tokens( + &ipld, + &[Token::Bytes(&[ + 1, 85, 18, 32, 159, 228, 204, 198, 222, 22, 114, 79, 58, 48, 199, 232, 242, 84, 243, + 198, 71, 25, 134, 172, 177, 248, 216, 207, 142, 150, 206, 42, 215, 219, 231, 251, + ])], + ); +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_deserializer.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_deserializer.rs new file mode 100644 index 000000000000..5b04b90ced84 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_deserializer.rs @@ -0,0 +1,709 @@ +#![cfg(feature = "serde")] + +extern crate alloc; + +use alloc::collections::BTreeMap; +use core::convert::TryFrom; + +use serde::Deserialize; +use serde_bytes::ByteBuf; +use serde_json::json; + +use ipld_core::cid::Cid; +use ipld_core::ipld::Ipld; + +/// This function is to test that all IPLD kinds except the given one errors, when trying to +/// deserialize to the given Rust type. +fn error_except<'de, T>(_input: T, except: &Ipld) +where + T: Deserialize<'de> + core::fmt::Debug, +{ + if !matches!(except, Ipld::Null) { + assert!(T::deserialize(Ipld::Null).is_err()); + } + if !matches!(except, Ipld::Bool(_)) { + assert!(T::deserialize(Ipld::Bool(true)).is_err()); + } + if !matches!(except, Ipld::Integer(_)) { + assert!(T::deserialize(Ipld::Integer(22)).is_err()); + } + if !matches!(except, Ipld::Float(_)) { + assert!(T::deserialize(Ipld::Float(5.3)).is_err()); + } + if !matches!(except, Ipld::String(_)) { + assert!(T::deserialize(Ipld::String("hello".into())).is_err()); + } + if !matches!(except, Ipld::Bytes(_)) { + assert!(T::deserialize(Ipld::Bytes(vec![0x68, 0x65, 0x6c, 0x6c, 0x6f])).is_err()); + } + if !matches!(except, Ipld::List(_)) { + assert!(T::deserialize(Ipld::List(vec![Ipld::Integer(22), Ipld::Bool(false)])).is_err()); + } + if !matches!(except, Ipld::Map(_)) { + assert!(T::deserialize(Ipld::Map(BTreeMap::from([ + ("hello".into(), Ipld::Null), + ("world!".into(), Ipld::Float(7.4)) + ]))) + .is_err()); + } + if !matches!(except, Ipld::Link(_)) { + assert!(T::deserialize(Ipld::Link( + Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap() + )) + .is_err()); + } +} + +#[test] +#[allow(clippy::unit_cmp)] +#[allow(clippy::let_unit_value)] +fn ipld_deserializer_unit() { + let unit = (); + let ipld = Ipld::Null; + error_except(unit, &ipld); + + let deserialized = <()>::deserialize(ipld).unwrap(); + assert_eq!(deserialized, ()); +} + +#[test] +fn ipld_deserializer_unit_struct() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct UnitStruct; + + let ipld = Ipld::Null; + let deserialized = UnitStruct::deserialize(ipld); + assert!(deserialized.is_err()); +} + +#[test] +fn ipld_deserializer_bool() { + let bool = false; + let ipld = Ipld::Bool(bool); + error_except(bool, &ipld); + + let deserialized = bool::deserialize(ipld).unwrap(); + assert_eq!(deserialized, bool); +} + +#[test] +fn ipld_deserializer_u8() { + let integer = 34u8; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = u8::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to u8." + ); + + let too_large = u8::deserialize(Ipld::Integer((u8::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = u8::deserialize(Ipld::Integer((u8::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_u16() { + let integer = 345u16; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = u16::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to u16." + ); + + let too_large = u16::deserialize(Ipld::Integer((u16::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = u16::deserialize(Ipld::Integer((u16::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_u32() { + let integer = 345678u32; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = u32::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to u32." + ); + + let too_large = u32::deserialize(Ipld::Integer((u32::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = u32::deserialize(Ipld::Integer((u32::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_u64() { + let integer = 34567890123u64; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = u64::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to u64." + ); + + let too_large = u64::deserialize(Ipld::Integer((u64::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = u64::deserialize(Ipld::Integer((u64::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_i8() { + let integer = -23i8; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = i8::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to i8." + ); + + let too_large = i8::deserialize(Ipld::Integer((i8::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = i8::deserialize(Ipld::Integer((i8::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_i16() { + let integer = 2345i16; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = i16::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to i16." + ); + + let too_large = i16::deserialize(Ipld::Integer((i16::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = i16::deserialize(Ipld::Integer((i16::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_i32() { + let integer = 234567i32; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = i32::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to i32." + ); + + let too_large = i32::deserialize(Ipld::Integer((i32::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = i32::deserialize(Ipld::Integer((i32::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_i64() { + let integer = 2345678901i64; + let ipld = Ipld::Integer(integer.into()); + error_except(integer, &ipld); + + let deserialized = i64::deserialize(ipld).unwrap(); + assert_eq!( + deserialized, integer, + "Correctly deserialize Ipld::Integer to i64." + ); + + let too_large = i64::deserialize(Ipld::Integer((i64::MAX as i128) + 10)); + assert!(too_large.is_err(), "Number must be within range."); + let too_small = i64::deserialize(Ipld::Integer((i64::MIN as i128) - 10)); + assert!(too_small.is_err(), "Number must be within range."); +} + +#[test] +fn ipld_deserializer_f32() { + let float = 7.3f32; + let ipld = Ipld::Float(float.into()); + error_except(float, &ipld); + + let deserialized = f32::deserialize(ipld).unwrap(); + assert_eq!(deserialized, float); +} + +#[test] +fn ipld_deserializer_f32_with_loss() { + // Make sure that there is an error if the value can only be converted with loss. 7.3f32 is + // different from 7.3f64. + let ipld = Ipld::Float(7.3f64); + let error = f32::deserialize(ipld); + assert!(error.is_err()); +} + +#[test] +fn ipld_deserializer_f32_nan() { + let ipld = Ipld::Float(f32::NAN.into()); + let error = f32::deserialize(ipld); + assert!(error.is_err()); +} + +#[test] +fn ipld_deserializer_f32_infinity() { + let ipld = Ipld::Float(f32::INFINITY.into()); + let error = f32::deserialize(ipld); + assert!(error.is_err()); +} + +#[test] +fn ipld_deserializer_f64() { + let float = 427.8f64; + let ipld = Ipld::Float(float); + error_except(float, &ipld); + + let deserialized = f64::deserialize(ipld).unwrap(); + assert_eq!(deserialized, float); +} + +#[test] +fn ipld_deserializer_f64_nan() { + let ipld = Ipld::Float(f64::NAN); + let error = f64::deserialize(ipld); + assert!(error.is_err()); +} + +#[test] +fn ipld_deserializer_f64_infinity() { + let ipld = Ipld::Float(f64::INFINITY); + let error = f64::deserialize(ipld); + assert!(error.is_err()); +} + +#[test] +fn ipld_deserializer_char() { + let char = 'x'; + let ipld = Ipld::String(char.to_string()); + error_except(char, &ipld); + + let deserialized = char::deserialize(ipld).unwrap(); + assert_eq!(deserialized, char); +} + +#[test] +fn ipld_deserializer_str() { + let str: &str = "hello"; + let ipld = Ipld::String(str.to_string()); + error_except(str, &ipld); + + // TODO vmx 2022-02-09: Doesn't work yet. If we would have a zero-copy version, it should + //let deserialized = <&str>::deserialize(ipld).unwrap(); + //assert_eq!(deserialized, string); +} + +#[test] +fn ipld_deserializer_string() { + let string = "hello".to_string(); + let ipld = Ipld::String(string.clone()); + error_except(string.clone(), &ipld); + + let deserialized = String::deserialize(ipld).unwrap(); + assert_eq!(deserialized, string); +} + +#[test] +fn ipld_deserializer_bytes() { + let bytes = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::Bytes(bytes.clone()); + error_except(&bytes[..], &ipld); + + // TODO vmx 2022-02-09: Doesn't work yet. If we would have a zero-copy version, it should + //let deserialized = <&[u8]>::deserialize(ipld).unwrap(); + //assert_eq!(deserialized, bytes); +} + +#[test] +fn ipld_deserializer_byte_buf() { + let bytes = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::Bytes(bytes.clone()); + error_except(ByteBuf::from(bytes.clone()), &ipld); + + let deserialized = ByteBuf::deserialize(ipld).unwrap(); + assert_eq!(deserialized, bytes); +} + +#[test] +fn ipld_deserializer_list() { + let list = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::List(vec![ + Ipld::Integer(0x68), + Ipld::Integer(0x65), + Ipld::Integer(0x6c), + Ipld::Integer(0x6c), + Ipld::Integer(0x6f), + ]); + error_except(list.clone(), &ipld); + + let deserialized = Vec::::deserialize(ipld).unwrap(); + assert_eq!(deserialized, list); +} + +#[test] +fn ipld_deserializer_tuple() { + let tuple = (true, "hello".to_string()); + let ipld = Ipld::List(vec![Ipld::Bool(tuple.0), Ipld::String(tuple.1.clone())]); + error_except(tuple.clone(), &ipld); + + let deserialized = <(bool, String)>::deserialize(ipld).unwrap(); + assert_eq!(deserialized, tuple); +} + +#[test] +fn ipld_deserializer_tuple_errors() { + let tuple = (true, "hello".to_string()); + + let ipld_not_enough = Ipld::List(vec![Ipld::Bool(tuple.0)]); + error_except(tuple.clone(), &ipld_not_enough); + let error_not_enough = <(bool, String)>::deserialize(ipld_not_enough); + assert!(error_not_enough.is_err()); + + let ipld_too_many = Ipld::List(vec![ + Ipld::Bool(tuple.0), + Ipld::String(tuple.1.clone()), + Ipld::Null, + ]); + error_except(tuple.clone(), &ipld_too_many); + let error_too_many = <(bool, String)>::deserialize(ipld_too_many); + assert!(error_too_many.is_err()); + + let ipld_not_matching = Ipld::List(vec![Ipld::String(tuple.1.clone()), Ipld::Bool(tuple.0)]); + error_except(tuple, &ipld_not_matching); + let error_not_matching = <(bool, String)>::deserialize(ipld_not_matching); + assert!(error_not_matching.is_err()); +} + +#[test] +fn ipld_deserializer_tuple_struct() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct TupleStruct(u8, bool); + + let tuple_struct = TupleStruct(82, true); + let ipld = Ipld::List(vec![Ipld::Integer(82), Ipld::Bool(true)]); + error_except(tuple_struct.clone(), &ipld); + + let deserialized = TupleStruct::deserialize(ipld).unwrap(); + assert_eq!(deserialized, tuple_struct); +} + +#[test] +fn ipld_deserializer_tuple_struct_errors() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct TupleStruct(u8, bool); + + let tuple_struct = TupleStruct(82, true); + + let ipld_not_enough = Ipld::List(vec![Ipld::Integer(tuple_struct.0.into())]); + error_except(tuple_struct.clone(), &ipld_not_enough); + let error_not_enough = TupleStruct::deserialize(ipld_not_enough); + assert!(error_not_enough.is_err()); + + let ipld_too_many = Ipld::List(vec![ + Ipld::Integer(tuple_struct.0.into()), + Ipld::Bool(tuple_struct.1), + Ipld::Null, + ]); + error_except(tuple_struct.clone(), &ipld_too_many); + let error_too_many = TupleStruct::deserialize(ipld_too_many); + assert!(error_too_many.is_err()); + + let ipld_not_matching = Ipld::List(vec![ + Ipld::Bool(tuple_struct.1), + Ipld::Integer(tuple_struct.0.into()), + ]); + error_except(tuple_struct, &ipld_not_matching); + let error_not_matching = TupleStruct::deserialize(ipld_not_matching); + assert!(error_not_matching.is_err()); +} + +#[test] +fn ipld_deserializer_map() { + let map = BTreeMap::from([("hello".to_string(), true), ("world!".to_string(), false)]); + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".to_string(), Ipld::Bool(true)), + ("world!".to_string(), Ipld::Bool(false)), + ])); + error_except(map.clone(), &ipld); + + let deserialized = BTreeMap::deserialize(ipld).unwrap(); + assert_eq!(deserialized, map); +} + +/// A CID is deserialized through a newtype struct. +#[test] +fn ipld_deserializer_cid() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + error_except(cid, &ipld); + + let deserialized = Cid::deserialize(ipld).unwrap(); + assert_eq!(deserialized, cid); +} + +/// Make sure that a CID cannot be deserialized into bytes. +#[test] +fn ipld_deserializer_cid_not_bytes() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + error_except(cid, &ipld); + + let deserialized = ByteBuf::deserialize(ipld); + assert!(deserialized.is_err()); +} + +/// Make sure that a CID cannot be deserialized into bytes. +#[test] +fn ipld_deserializer_cid_not_bytes_newtype_struct() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct Wrapped(ByteBuf); + + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + error_except(cid, &ipld); + + let deserialized = Wrapped::deserialize(ipld); + assert!(deserialized.is_err()); +} + +/// Make sure that a CID cannot be deserialized into bytes. +#[test] +fn ipld_deserializer_cid_untagged() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + #[serde(untagged)] + enum MyOption { + Some(ByteBuf), + None, + } + + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + error_except(cid, &ipld); + + let deserialized = MyOption::deserialize(ipld); + assert!(deserialized.is_err()); +} + +#[test] +fn ipld_deserializer_newtype_struct() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct Wrapped(u8); + + let newtype_struct = Wrapped(5); + let ipld = Ipld::Integer(5); + error_except(newtype_struct.clone(), &ipld); + + let deserialized = Wrapped::deserialize(ipld).unwrap(); + assert_eq!(deserialized, newtype_struct); +} + +/// An additional test, just to make sure that wrapped CIDs also work. +#[test] +fn ipld_deserializer_newtype_struct_cid() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct Wrapped(Cid); + + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let newtype_struct = Wrapped(cid); + let ipld = Ipld::Link(cid); + error_except(newtype_struct.clone(), &ipld); + + let deserialized = Wrapped::deserialize(ipld).unwrap(); + assert_eq!(deserialized, newtype_struct); +} + +#[test] +fn ipld_deserializer_option() { + let option_some: Option = Some(58u8); + let option_none: Option = None; + let ipld_some = Ipld::Integer(58); + let ipld_none = Ipld::Null; + + // This is similar to `error_except`, which cannot be used here, as we need to exclude + // `Ipld::Integer` *and* `Ipld::Null`. + assert!(>::deserialize(Ipld::Bool(true)).is_err()); + assert!(>::deserialize(Ipld::Float(5.3)).is_err()); + assert!(>::deserialize(Ipld::String("hello".into())).is_err()); + assert!(>::deserialize(Ipld::Bytes(vec![0x01, 0x97])).is_err()); + assert!( + >::deserialize(Ipld::List(vec![Ipld::Integer(22), Ipld::Bool(false)])).is_err() + ); + assert!(>::deserialize(Ipld::Map(BTreeMap::from([ + ("hello".into(), Ipld::Null), + ("world!".into(), Ipld::Float(7.4)) + ]))) + .is_err()); + assert!(>::deserialize(Ipld::Link( + Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap() + )) + .is_err()); + + let deserialized_some = >::deserialize(ipld_some).unwrap(); + assert_eq!(deserialized_some, option_some); + let deserialized_none = >::deserialize(ipld_none).unwrap(); + assert_eq!(deserialized_none, option_none); +} + +#[test] +fn ipld_deserializer_enum() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + enum MyEnum { + One, + Two(u8), + Three { value: bool }, + } + + let enum_one = MyEnum::One; + let ipld_one = Ipld::String("One".into()); + error_except(enum_one.clone(), &ipld_one); + let deserialized_one = MyEnum::deserialize(ipld_one).unwrap(); + assert_eq!(deserialized_one, enum_one); + + let enum_two = MyEnum::Two(4); + let ipld_two = Ipld::Map(BTreeMap::from([("Two".into(), Ipld::Integer(4))])); + error_except(enum_two.clone(), &ipld_two); + let deserialized_two = MyEnum::deserialize(ipld_two).unwrap(); + assert_eq!(deserialized_two, enum_two); + + let enum_three = MyEnum::Three { value: true }; + let ipld_three = Ipld::Map(BTreeMap::from([( + "Three".into(), + Ipld::Map(BTreeMap::from([("value".into(), Ipld::Bool(true))])), + )])); + error_except(enum_three.clone(), &ipld_three); + let deserialized_three = MyEnum::deserialize(ipld_three).unwrap(); + assert_eq!(deserialized_three, enum_three); +} + +#[test] +fn ipld_deserializer_enum_tuple_variant_errors() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + enum MyEnum { + Two(u8, bool), + } + + let tuple_variant = MyEnum::Two(17, false); + + let ipld_not_enough = Ipld::Map(BTreeMap::from([( + "Two".into(), + Ipld::List(vec![Ipld::Integer(17)]), + )])); + error_except(tuple_variant.clone(), &ipld_not_enough); + let error_not_enough = MyEnum::deserialize(ipld_not_enough); + assert!(error_not_enough.is_err()); + + let ipld_too_many = Ipld::Map(BTreeMap::from([( + "Two".into(), + Ipld::List(vec![Ipld::Integer(17), Ipld::Bool(false), Ipld::Null]), + )])); + error_except(tuple_variant.clone(), &ipld_too_many); + let error_too_many = MyEnum::deserialize(ipld_too_many); + assert!(error_too_many.is_err()); + + let ipld_not_matching = Ipld::Map(BTreeMap::from([( + "Two".into(), + Ipld::List(vec![Ipld::Bool(false), Ipld::Integer(17)]), + )])); + error_except(tuple_variant, &ipld_not_matching); + let error_not_matching = MyEnum::deserialize(ipld_not_matching); + assert!(error_not_matching.is_err()); +} + +#[test] +fn ipld_deserializer_struct() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct MyStruct { + hello: u8, + world: bool, + } + + let my_struct = MyStruct { + hello: 91, + world: false, + }; + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".into(), Ipld::Integer(my_struct.hello.into())), + ("world".into(), Ipld::Bool(my_struct.world)), + ])); + error_except(my_struct.clone(), &ipld); + + let deserialized = MyStruct::deserialize(ipld).unwrap(); + assert_eq!(deserialized, my_struct); +} + +#[test] +fn ipld_deserializer_struct_errors() { + #[derive(Clone, Debug, Deserialize, PartialEq)] + struct MyStruct { + hello: u8, + world: bool, + } + + let my_struct = MyStruct { + hello: 91, + world: false, + }; + + let ipld_missing = Ipld::Map(BTreeMap::from([( + "hello".into(), + Ipld::Integer(my_struct.hello.into()), + )])); + error_except(my_struct.clone(), &ipld_missing); + let error_missing = MyStruct::deserialize(ipld_missing); + assert!(error_missing.is_err()); + + let ipld_wrong = Ipld::Map(BTreeMap::from([( + "wrong".into(), + Ipld::Integer(my_struct.hello.into()), + )])); + error_except(my_struct, &ipld_wrong); + let error_wrong = MyStruct::deserialize(ipld_wrong); + assert!(error_wrong.is_err()); +} + +/// This tests exercises the `deserialize_any` code path. +#[test] +fn ipld_deserializer_ipld() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + error_except(cid, &ipld); + + let deserialized = Ipld::deserialize(ipld.clone()).unwrap(); + assert_eq!(deserialized, ipld); +} + +/// This test shows that the values [`serde_json::Value`] supports, can be deserialized into Ipld +#[test] +fn ipld_deserializer_serde_json_value() { + let json_value = json!({ "hello": true, "world": "it is" }); + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".into(), Ipld::Bool(true)), + ("world".into(), Ipld::String("it is".into())), + ])); + let deserialized = serde_json::Value::deserialize(ipld).unwrap(); + assert_eq!(deserialized, json_value); +} + +/// This test shows that CIDs cannot be deserialized into a [`serde_json::Value`]. +#[test] +fn ipld_deserializer_serde_json_value_cid_fails() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + let error = serde_json::Value::deserialize(ipld); + assert!(error.is_err()); +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_serialize.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_serialize.rs new file mode 100644 index 000000000000..c24f317297d7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_serialize.rs @@ -0,0 +1,122 @@ +#![cfg(feature = "serde")] + +extern crate alloc; + +use alloc::collections::BTreeMap; +use core::convert::TryFrom; + +use serde_test::{assert_ser_tokens, Token}; + +use ipld_core::cid::{serde::CID_SERDE_PRIVATE_IDENTIFIER, Cid}; +use ipld_core::ipld::Ipld; + +#[test] +fn ipld_serialize_null() { + let ipld = Ipld::Null; + assert_ser_tokens(&ipld, &[Token::None]); +} + +#[test] +fn ipld_serialize_bool() { + let bool = true; + let ipld = Ipld::Bool(bool); + assert_ser_tokens(&ipld, &[Token::Bool(bool)]); +} + +// NOTE vmx 2022-02-15: assert_ser_tokens doesn't support i128 +//#[test] +//fn ipld_serialize_integer() { +// let integer = 32u8; +// let ipld = Ipld::Integer(integer.into()); +//} + +#[test] +fn ipld_serialize_float() { + let float = 32.41f32; + let ipld = Ipld::Float(float.into()); + assert_ser_tokens(&ipld, &[Token::F64(float.into())]); +} + +#[test] +fn ipld_serialize_string() { + let string = "hello"; + let ipld = Ipld::String(string.into()); + assert_ser_tokens(&ipld, &[Token::Str(string)]); + assert_ser_tokens(&ipld, &[Token::BorrowedStr(string)]); + assert_ser_tokens(&ipld, &[Token::String(string)]); +} + +#[test] +fn ipld_serialize_bytes() { + let bytes = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::Bytes(bytes); + assert_ser_tokens(&ipld, &[Token::Bytes(b"hello")]); + assert_ser_tokens(&ipld, &[Token::BorrowedBytes(b"hello")]); + assert_ser_tokens(&ipld, &[Token::ByteBuf(b"hello")]); +} + +#[test] +fn ipld_serialize_list() { + let ipld = Ipld::List(vec![Ipld::Bool(false), Ipld::Float(22.7)]); + assert_ser_tokens( + &ipld, + &[ + Token::Seq { len: Some(2) }, + Token::Bool(false), + Token::F64(22.7), + Token::SeqEnd, + ], + ); +} + +#[test] +fn ipld_serialize_map() { + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".to_string(), Ipld::Bool(true)), + ("world!".to_string(), Ipld::Bool(false)), + ])); + assert_ser_tokens( + &ipld, + &[ + Token::Map { len: Some(2) }, + Token::Str("hello"), + Token::Bool(true), + Token::Str("world!"), + Token::Bool(false), + Token::MapEnd, + ], + ); +} + +#[test] +fn ipld_serialize_link() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + assert_ser_tokens( + &ipld, + &[ + Token::NewtypeStruct { + name: CID_SERDE_PRIVATE_IDENTIFIER, + }, + Token::Bytes(&[ + 1, 85, 18, 32, 159, 228, 204, 198, 222, 22, 114, 79, 58, 48, 199, 232, 242, 84, + 243, 198, 71, 25, 134, 172, 177, 248, 216, 207, 142, 150, 206, 42, 215, 219, 231, + 251, + ]), + ], + ); +} + +#[test] +#[should_panic(expected = "expected Token::Bytes")] +fn ipld_serialize_link_not_as_bytes() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + assert_ser_tokens( + &ipld, + &[Token::Bytes(&[ + 1, 85, 18, 32, 159, 228, 204, 198, 222, 22, 114, 79, 58, 48, 199, 232, 242, 84, 243, + 198, 71, 25, 134, 172, 177, 248, 216, 207, 142, 150, 206, 42, 215, 219, 231, 251, + ])], + ); +} diff --git a/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_serializer.rs b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_serializer.rs new file mode 100644 index 000000000000..56952c47e12d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/ipld-core-0.4.1/tests/serde_serializer.rs @@ -0,0 +1,269 @@ +#![cfg(feature = "serde")] + +extern crate alloc; + +use alloc::collections::BTreeMap; +use core::convert::TryFrom; + +use serde::ser; +use serde_bytes::ByteBuf; +use serde_derive::Serialize; + +use ipld_core::cid::Cid; +use ipld_core::ipld::Ipld; +use ipld_core::serde::to_ipld; + +fn assert_serialized(input: T, ipld: Ipld) +where + T: ser::Serialize, +{ + let serialized = to_ipld(input).unwrap(); + assert_eq!(serialized, ipld); +} + +#[test] +#[allow(clippy::let_unit_value)] +fn ipld_serializer_unit() { + let unit = (); + let serialized = to_ipld(unit); + assert!(serialized.is_err()); +} + +#[test] +fn ipld_serializer_unit_struct() { + #[derive(Clone, Debug, Serialize, PartialEq)] + struct UnitStruct; + + let unit_struct = UnitStruct; + let serialized = to_ipld(unit_struct); + assert!(serialized.is_err()); +} + +#[test] +fn ipld_serializer_bool() { + let bool = false; + let ipld = Ipld::Bool(bool); + assert_serialized(bool, ipld); +} + +#[test] +fn ipld_serializer_u8() { + let integer = 34u8; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_u16() { + let integer = 345u16; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_u32() { + let integer = 345678u32; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_u64() { + let integer = 34567890123u64; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_i8() { + let integer = -23i8; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_i16() { + let integer = 2345i16; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_i32() { + let integer = 234567i32; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_i64() { + let integer = 2345678901i64; + let ipld = Ipld::Integer(integer.into()); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_i128() { + let integer = 34567890123467890123i128; + let ipld = Ipld::Integer(integer); + assert_serialized(integer, ipld); +} + +#[test] +fn ipld_serializer_f32() { + let float = 7.3f32; + let ipld = Ipld::Float(float.into()); + assert_serialized(float, ipld); +} + +#[test] +fn ipld_serializer_f64() { + let float = 427.8f64; + let ipld = Ipld::Float(float); + assert_serialized(float, ipld); +} + +#[test] +fn ipld_serializer_char() { + let char = 'x'; + let ipld = Ipld::String(char.to_string()); + assert_serialized(char, ipld); +} + +#[test] +fn ipld_serializer_str() { + let str: &str = "hello"; + let ipld = Ipld::String(str.to_string()); + assert_serialized(str, ipld); +} + +#[test] +fn ipld_serializer_bytes() { + let bytes = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::Bytes(bytes.clone()); + assert_serialized(ByteBuf::from(bytes), ipld); +} + +#[test] +fn ipld_serializer_list() { + let list = vec![0x68, 0x65, 0x6c, 0x6c, 0x6f]; + let ipld = Ipld::List(vec![ + Ipld::Integer(0x68), + Ipld::Integer(0x65), + Ipld::Integer(0x6c), + Ipld::Integer(0x6c), + Ipld::Integer(0x6f), + ]); + assert_serialized(list, ipld); +} + +#[test] +fn ipld_serializer_tuple() { + let tuple = (true, "hello".to_string()); + let ipld = Ipld::List(vec![Ipld::Bool(tuple.0), Ipld::String(tuple.1.clone())]); + assert_serialized(tuple, ipld); +} + +#[test] +fn ipld_serializer_tuple_struct() { + #[derive(Clone, Debug, Serialize, PartialEq)] + struct TupleStruct(u8, bool); + + let tuple_struct = TupleStruct(82, true); + let ipld = Ipld::List(vec![Ipld::Integer(82), Ipld::Bool(true)]); + assert_serialized(tuple_struct, ipld); +} + +#[test] +fn ipld_serializer_map() { + let map = BTreeMap::from([("hello".to_string(), true), ("world!".to_string(), false)]); + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".to_string(), Ipld::Bool(true)), + ("world!".to_string(), Ipld::Bool(false)), + ])); + assert_serialized(map, ipld); +} + +/// A CID is deserialized through a newtype struct. +#[test] +fn ipld_serializer_cid() { + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let ipld = Ipld::Link(cid); + assert_serialized(cid, ipld); +} + +#[test] +fn ipld_serializer_newtype_struct() { + #[derive(Clone, Debug, Serialize, PartialEq)] + struct Wrapped(u8); + + let newtype_struct = Wrapped(3); + let ipld = Ipld::Integer(3); + assert_serialized(newtype_struct, ipld); +} + +/// An additional test, just to make sure that wrapped CIDs also work. +#[test] +fn ipld_serializer_newtype_struct_cid() { + #[derive(Clone, Debug, Serialize, PartialEq)] + struct Wrapped(Cid); + + let cid = Cid::try_from("bafkreie74tgmnxqwojhtumgh5dzfj46gi4mynlfr7dmm7duwzyvnpw7h7m").unwrap(); + let newtype_struct = Wrapped(cid); + let ipld = Ipld::Link(cid); + assert_serialized(newtype_struct, ipld); +} + +#[test] +fn ipld_serializer_option() { + let option_some: Option = Some(58u8); + let option_none: Option = None; + let ipld_some = Ipld::Integer(58); + let ipld_none = Ipld::Null; + assert_serialized(option_some, ipld_some); + assert_serialized(option_none, ipld_none); +} + +#[test] +fn ipld_serializer_enum() { + #[derive(Clone, Debug, Serialize, PartialEq)] + enum MyEnum { + One, + Two(u8), + Three { value: bool }, + } + + let enum_one = MyEnum::One; + let ipld_one = Ipld::String("One".into()); + assert_serialized(enum_one, ipld_one); + + let enum_two = MyEnum::Two(4); + let ipld_two = Ipld::Map(BTreeMap::from([("Two".into(), Ipld::Integer(4))])); + assert_serialized(enum_two, ipld_two); + + let enum_three = MyEnum::Three { value: true }; + let ipld_three = Ipld::Map(BTreeMap::from([( + "Three".into(), + Ipld::Map(BTreeMap::from([("value".into(), Ipld::Bool(true))])), + )])); + assert_serialized(enum_three, ipld_three); +} + +#[test] +fn ipld_serializer_struct() { + #[derive(Clone, Debug, Serialize, PartialEq)] + struct MyStruct { + hello: u8, + world: bool, + } + + let my_struct = MyStruct { + hello: 91, + world: false, + }; + let ipld = Ipld::Map(BTreeMap::from([ + ("hello".into(), Ipld::Integer(my_struct.hello.into())), + ("world".into(), Ipld::Bool(my_struct.world)), + ])); + assert_serialized(my_struct, ipld); +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.cargo_vcs_info.json deleted file mode 100644 index a462b06c7998..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "02c5c664a7e59f6ecf84e81276c4eb8bb65693d5" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/workflows/build.yml b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/workflows/build.yml deleted file mode 100644 index 49a31882466b..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/workflows/build.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: build - -on: [push, pull_request] - -jobs: - build: - name: Build - strategy: - fail-fast: false - matrix: - platform: [ubuntu-latest, macos-latest, windows-latest] - toolchain: [stable] - runs-on: ${{ matrix.platform }} - - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Cache Dependencies & Build Outputs - uses: actions/cache@v3 - with: - path: ~/.cargo - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.toolchain }} - override: true - components: rustfmt, clippy - - - name: Check Code Format - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - - name: Code Lint - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-targets --all-features --workspace -- -D warnings - - - name: Code Lint Without Default Features - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --no-default-features --workspace -- -D warnings - - - name: Test - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --workspace - - build-no-std: - name: Build no_std - runs-on: ubuntu-latest - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - target: thumbv6m-none-eabi - - - name: Build - uses: actions-rs/cargo@v1 - with: - command: build - args: --no-default-features --workspace --target thumbv6m-none-eabi - - coverage: - name: Code Coverage - runs-on: ubuntu-latest - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Install Rust Toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - - - name: Generate Code Coverage - uses: actions-rs/tarpaulin@v0.1 - with: - args: --all-features - - - name: Upload Code Coverage - uses: codecov/codecov-action@v3 diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/CHANGELOG.md deleted file mode 100644 index 4628fe4313e6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/CHANGELOG.md +++ /dev/null @@ -1,29 +0,0 @@ -# [v0.18.1](https://github.com/multiformats/rust-multihash/compare/v0.18.0...v0.18.1) (2023-04-14) - - -### Bug Fixes - -* don't panic on non minimal varints ([#293](https://github.com/multiformats/rust-multihash/issues/293)) ([c3445fc](https://github.com/multiformats/rust-multihash/commit/c3445fc5041b0fc573945321ebd4b0cdffe0daa5)), closes [#282](https://github.com/multiformats/rust-multihash/issues/282) - - -# [v0.18.0](https://github.com/multiformats/rust-multihash/compare/v0.17.0...v) (2022-12-06) - - -### Bug Fixes - -* remove Nix support ([#254](https://github.com/multiformats/rust-multihash/issues/254)) ([ebf57dd](https://github.com/multiformats/rust-multihash/commit/ebf57ddb82be2d2fd0a2f00666b0f888d4c78e1b)), closes [#247](https://github.com/multiformats/rust-multihash/issues/247) -* update to Rust edition 2021 ([#255](https://github.com/multiformats/rust-multihash/issues/255)) ([da53376](https://github.com/multiformats/rust-multihash/commit/da53376e0d9cf2d82d6c0d10590a77991cb3a6b6)) - - -### Features - -* add `encoded_len` and bytes written ([#252](https://github.com/multiformats/rust-multihash/issues/252)) ([b3cc43e](https://github.com/multiformats/rust-multihash/commit/b3cc43ecb6f9c59da774b094853d6542430d55ad)) - - -### BREAKING CHANGES - -* update to Rust edition 2021 -* `Multihash::write()` returns bytes written - - Prior to this change it returned an empty tuple `()`, now it returns -the bytes written. diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/Cargo.toml deleted file mode 100644 index aab0696a0fe4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/Cargo.toml +++ /dev/null @@ -1,192 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.59" -name = "multihash" -version = "0.18.1" -authors = [ - "dignifiedquire ", - "David Craven ", - "Volker Mische ", -] -description = "Implementation of the multihash format" -documentation = "https://docs.rs/multihash/" -readme = "README.md" -keywords = [ - "multihash", - "ipfs", -] -license = "MIT" -repository = "https://github.com/multiformats/rust-multihash" - -[[bench]] -name = "multihash" -harness = false - -[dependencies.arbitrary] -version = "1.1.0" -optional = true - -[dependencies.blake2b_simd] -version = "1.0.0" -optional = true -default-features = false - -[dependencies.blake2s_simd] -version = "1.0.0" -optional = true -default-features = false - -[dependencies.blake3] -version = "1.2.0" -optional = true -default-features = false - -[dependencies.core2] -version = "0.4.0" -default-features = false - -[dependencies.digest] -version = "0.10.1" -optional = true -default-features = false - -[dependencies.multihash-derive] -version = "0.8.0" -optional = true -default-features = false - -[dependencies.parity-scale-codec] -version = "3.0.0" -features = ["derive"] -optional = true -default-features = false - -[dependencies.quickcheck] -version = "1.0.3" -optional = true - -[dependencies.rand] -version = "0.8.5" -features = ["small_rng"] -optional = true - -[dependencies.ripemd-rs] -version = "0.1.1" -optional = true -package = "ripemd" - -[dependencies.serde] -version = "1.0.116" -features = ["derive"] -optional = true -default-features = false - -[dependencies.serde-big-array] -version = "0.3.2" -features = ["const-generics"] -optional = true - -[dependencies.sha-1] -version = "0.10.0" -optional = true -default-features = false - -[dependencies.sha-2] -version = "0.10.0" -optional = true -default-features = false -package = "sha2" - -[dependencies.sha-3] -version = "0.10.0" -optional = true -default-features = false -package = "sha3" - -[dependencies.strobe-rs] -version = "0.7.0" -optional = true -default-features = false - -[dependencies.unsigned-varint] -version = "0.7.1" -default-features = false - -[dev-dependencies.arbitrary] -version = "1.1.0" - -[dev-dependencies.criterion] -version = "0.3.3" - -[dev-dependencies.hex] -version = "0.4.2" - -[dev-dependencies.quickcheck] -version = "1.0.3" - -[dev-dependencies.rand] -version = "0.8.5" - -[dev-dependencies.serde_json] -version = "1.0.58" - -[features] -alloc = ["core2/alloc"] -arb = [ - "quickcheck", - "rand", - "arbitrary", -] -blake2b = ["blake2b_simd"] -blake2s = ["blake2s_simd"] -default = [ - "std", - "derive", - "multihash-impl", - "secure-hashes", -] -derive = ["multihash-derive"] -identity = [] -multihash-impl = ["derive"] -ripemd = ["ripemd-rs"] -scale-codec = ["parity-scale-codec"] -secure-hashes = [ - "blake2b", - "blake2s", - "blake3", - "sha2", - "sha3", -] -serde-codec = [ - "serde", - "serde-big-array", -] -sha1 = [ - "digest", - "sha-1", -] -sha2 = [ - "digest", - "sha-2", -] -sha3 = [ - "digest", - "sha-3", -] -std = [ - "unsigned-varint/std", - "multihash-derive/std", - "alloc", -] -strobe = ["strobe-rs"] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/Cargo.toml.orig deleted file mode 100644 index d53f6185e461..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/Cargo.toml.orig +++ /dev/null @@ -1,70 +0,0 @@ -[workspace] -members = ["derive", "."] - -[package] -name = "multihash" -description = "Implementation of the multihash format" -repository = "https://github.com/multiformats/rust-multihash" -keywords = ["multihash", "ipfs"] -version = "0.18.1" -authors = ["dignifiedquire ", "David Craven ", "Volker Mische "] -license = "MIT" -readme = "README.md" -documentation = "https://docs.rs/multihash/" -edition = "2021" -rust-version = "1.59" - -[features] -default = ["std", "derive", "multihash-impl", "secure-hashes"] -std = ["unsigned-varint/std", "multihash-derive/std", "alloc"] -alloc = ["core2/alloc"] -multihash-impl = ["derive"] -derive = ["multihash-derive"] -arb = ["quickcheck", "rand", "arbitrary"] -secure-hashes = ["blake2b", "blake2s", "blake3", "sha2", "sha3"] -scale-codec = ["parity-scale-codec"] -serde-codec = ["serde", "serde-big-array"] - -blake2b = ["blake2b_simd"] -blake2s = ["blake2s_simd"] -identity = [] -sha1 = ["digest", "sha-1"] -sha2 = ["digest", "sha-2"] -sha3 = ["digest", "sha-3"] -strobe = ["strobe-rs"] -ripemd = ["ripemd-rs"] - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"], optional = true } -quickcheck = { version = "1.0.3", optional = true } -rand = { version = "0.8.5", optional = true, features = ["small_rng"] } -serde = { version = "1.0.116", optional = true, default-features = false, features = ["derive"] } -serde-big-array = { version = "0.3.2", optional = true, features = ["const-generics"] } -multihash-derive = { version = "0.8.0", path = "derive", default-features = false, optional = true } -unsigned-varint = { version = "0.7.1", default-features = false } -arbitrary = {version = "1.1.0", optional = true } - -blake2b_simd = { version = "1.0.0", default-features = false, optional = true } -blake2s_simd = { version = "1.0.0", default-features = false, optional = true } -blake3 = { version = "1.2.0", default-features = false, optional = true } -digest = { version = "0.10.1", default-features = false, optional = true } -sha-1 = { version = "0.10.0", default-features = false, optional = true } -sha-2 = { version = "0.10.0", default-features = false, optional = true, package = "sha2" } -sha-3 = { version = "0.10.0", default-features = false, optional = true, package = "sha3" } -strobe-rs = { version = "0.7.0", default-features = false, optional = true } -ripemd-rs = { package = "ripemd", version = "0.1.1", optional = true} - -core2 = { version = "0.4.0", default-features = false } - -[dev-dependencies] -criterion = "0.3.3" -hex = "0.4.2" -serde_json = "1.0.58" -quickcheck = "1.0.3" -rand = "0.8.5" -arbitrary = "1.1.0" -multihash = { path = ".", features = ["sha1", "strobe"] } - -[[bench]] -name = "multihash" -harness = false diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/README.md b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/README.md deleted file mode 100644 index d0167806ca81..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/README.md +++ /dev/null @@ -1,97 +0,0 @@ -# rust-multihash - -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) -[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) -[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) - -[![Build Status](https://github.com/multiformats/rust-multihash/workflows/build/badge.svg)](https://github.com/multiformats/rust-multihash/actions) -[![Crates.io](https://img.shields.io/crates/v/multihash?style=flat-square)](https://crates.io/crates/multihash) -[![License](https://img.shields.io/crates/l/multihash?style=flat-square)](LICENSE) -[![Documentation](https://docs.rs/multihash/badge.svg?style=flat-square)](https://docs.rs/multihash) -[![Dependency Status](https://deps.rs/repo/github/multiformats/rust-multihash/status.svg)](https://deps.rs/repo/github/multiformats/rust-multihash) -[![Coverage Status]( https://img.shields.io/codecov/c/github/multiformats/rust-multihash?style=flat-square)](https://codecov.io/gh/multiformats/rust-multihash) - -> [multihash](https://github.com/multiformats/multihash) implementation in Rust. - -## Table of Contents - - [Install](#install) - - [Usage](#usage) - - [Supported Hash Types](#supported-hash-types) - - [Maintainers](#maintainers) - - [Contribute](#contribute) - - [License](#license) - -## Install - -First add this to your `Cargo.toml` - -```toml -[dependencies] -multihash = "*" -``` - -Then run `cargo build`. - -MSRV 1.51.0 due to use of const generics - -## Usage - -```rust -use multihash::{Code, MultihashDigest}; - -fn main() { - let hash = Code::Sha2_256.digest(b"my hash"); - println!("{:?}", hash); -} -``` - -### Using a custom code table - -You can derive your own application specific code table: - -```rust -use multihash::derive::Multihash; -use multihash::MultihashCode; - -#[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)] -#[mh(alloc_size = 64)] -pub enum Code { - #[mh(code = 0x01, hasher = multihash::Sha2_256)] - Foo, - #[mh(code = 0x02, hasher = multihash::Sha2_512)] - Bar, -} - -fn main() { - let hash = Code::Foo.digest(b"my hash"); - println!("{:02x?}", hash); -} -``` - -## Supported Hash Types - -* `SHA1` -* `SHA2-256` -* `SHA2-512` -* `SHA3`/`Keccak` -* `Blake2b-256`/`Blake2b-512`/`Blake2s-128`/`Blake2s-256` -* `Blake3` -* `Strobe` - -## Maintainers - -Captain: [@dignifiedquire](https://github.com/dignifiedquire). - -## Contribute - -Contributions welcome. Please check out [the issues](https://github.com/multiformats/rust-multihash/issues). - -Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). - -Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. - - -## License - -[MIT](LICENSE) diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/RELEASE.md b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/RELEASE.md deleted file mode 100644 index e5284a2b6d24..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/RELEASE.md +++ /dev/null @@ -1,48 +0,0 @@ -Release process -=============== - -Generating Changelog --------------------- - -Install dependencies - -```sh -$ npm install -g conventional-changelog-cli -$ cd rust-multihash -$ conventional-changelog --preset angular -``` - -Add the output of that to `CHANGELOG.md`, and write a human-centric summary of changes. -Update the linked output to reference the new version, which conventional-changelog doesn't know about: - -```md -# [](https://github.com/multiformats/rust-multihash/compare/v0.17.0...v) (2022-12-06) -``` -becomes: -```md -# [v0.18.0](https://github.com/multiformats/rust-multihash/compare/v0.17.0...v0.18.0) (2022-12-06) -``` - -## Publishing - -Publishing on crates.io, bumping version & generating tags is done using [`cargo-release`](https://github.com/crate-ci/cargo-release). - -This requires the following permissions - -- on github.com/multiformats/rust-multihash - - creating tags - - pushing to `master` -- on crates.io - - publish access to all published crates - -Dry run - -```sh -$ cargo release -``` - -Actual publishing - -```sh -$ cargo release --execute -``` diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/benches/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/benches/multihash.rs deleted file mode 100644 index 0b75ea15b919..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/benches/multihash.rs +++ /dev/null @@ -1,98 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use rand::Rng; - -use multihash::{ - Blake2b256, Blake2b512, Blake2s128, Blake2s256, Blake3_256, Hasher, Keccak224, Keccak256, - Keccak384, Keccak512, Sha1, Sha2_256, Sha2_512, Sha3_224, Sha3_256, Sha3_384, Sha3_512, - Strobe256, Strobe512, -}; - -macro_rules! group_digest { - ($criterion:ident, $( $id:expr => $hash:ident, $input:expr)* ) => {{ - let mut group = $criterion.benchmark_group("digest"); - $( - group.bench_function($id, |b| { - b.iter(|| { - let mut hasher = $hash::default(); - hasher.update(black_box($input)); - let _ = black_box(hasher.finalize()); - }) - }); - )* - group.finish(); - }}; -} - -macro_rules! group_stream { - ($criterion:ident, $( $id:expr => $hash:ident, $input:expr)* ) => {{ - let mut group = $criterion.benchmark_group("stream"); - $( - group.bench_function($id, |b| { - b.iter(|| { - let input = black_box($input); - let mut hasher = <$hash>::default(); - for i in 0..3 { - let start = i * 256; - hasher.update(&input[start..(start + 256)]); - } - let _ = black_box(hasher.finalize()); - }) - }); - )* - group.finish(); - }}; -} - -fn bench_digest(c: &mut Criterion) { - let mut rng = rand::thread_rng(); - let data: Vec = (0..1024).map(|_| rng.gen()).collect(); - group_digest!(c, - "sha1" => Sha1, &data - "sha2_256" => Sha2_256, &data - "sha2_512" => Sha2_512, &data - "sha3_224" => Sha3_224, &data - "sha3_256" => Sha3_256, &data - "sha3_384" => Sha3_384, &data - "sha3_512" => Sha3_512, &data - "keccak_224" => Keccak224, &data - "keccak_256" => Keccak256, &data - "keccak_384" => Keccak384, &data - "keccak_512" => Keccak512, &data - "blake2b_256" => Blake2b256, &data - "blake2b_512" => Blake2b512, &data - "blake2s_128" => Blake2s128, &data - "blake2s_256" => Blake2s256, &data - "blake3_256" => Blake3_256, &data - "strobe_256" => Strobe256, &data - "strobe_512" => Strobe512, &data - ); -} - -/// Chunks the data into 256-byte slices. -fn bench_stream(c: &mut Criterion) { - let mut rng = rand::thread_rng(); - let data: Vec = (0..1024).map(|_| rng.gen()).collect(); - group_stream!(c, - "sha1" => Sha1, &data - "sha2_256" => Sha2_256, &data - "sha2_512" => Sha2_512, &data - "sha3_224" => Sha3_224, &data - "sha3_256" => Sha3_256, &data - "sha3_384" => Sha3_384, &data - "sha3_512" => Sha3_512, &data - "keccak_224" => Keccak224, &data - "keccak_256" => Keccak256, &data - "keccak_384" => Keccak384, &data - "keccak_512" => Keccak512, &data - "blake2b_256" => Blake2b256, &data - "blake2b_512" => Blake2b512, &data - "blake2s_128" => Blake2s128, &data - "blake2s_256" => Blake2s256, &data - "blake3_256" => Blake3_256, &data - "strobe_256" => Strobe256, &data - "strobe_512" => Strobe512, &data - ); -} - -criterion_group!(benches, bench_digest, bench_stream); -criterion_main!(benches); diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/arb.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/arb.rs deleted file mode 100644 index 2768a7786e10..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/arb.rs +++ /dev/null @@ -1,83 +0,0 @@ -use quickcheck::Gen; -use rand::{ - distributions::{weighted::WeightedIndex, Distribution}, - Rng, RngCore, SeedableRng, -}; - -use arbitrary::{size_hint, Unstructured}; - -use crate::MultihashGeneric; - -/// Generates a random valid multihash. -impl quickcheck::Arbitrary for MultihashGeneric { - fn arbitrary(g: &mut Gen) -> MultihashGeneric { - // In real world lower multihash codes are more likely to happen, hence distribute them - // with bias towards smaller values. - let weights = [128, 64, 32, 16, 8, 4, 2, 1]; - let dist = WeightedIndex::new(weights.iter()).unwrap(); - let mut rng = rand::rngs::SmallRng::seed_from_u64(u64::arbitrary(g)); - let code = match dist.sample(&mut rng) { - 0 => rng.gen_range(0..u64::pow(2, 7)), - 1 => rng.gen_range(u64::pow(2, 7)..u64::pow(2, 14)), - 2 => rng.gen_range(u64::pow(2, 14)..u64::pow(2, 21)), - 3 => rng.gen_range(u64::pow(2, 21)..u64::pow(2, 28)), - 4 => rng.gen_range(u64::pow(2, 28)..u64::pow(2, 35)), - 5 => rng.gen_range(u64::pow(2, 35)..u64::pow(2, 42)), - 6 => rng.gen_range(u64::pow(2, 42)..u64::pow(2, 49)), - 7 => rng.gen_range(u64::pow(2, 56)..u64::pow(2, 63)), - _ => unreachable!(), - }; - - // Maximum size is S byte due to the generic. - let size = rng.gen_range(0..S); - let mut data = [0; S]; - rng.fill_bytes(&mut data); - MultihashGeneric::wrap(code, &data[..size]).unwrap() - } -} - -impl<'a, const S: usize> arbitrary::Arbitrary<'a> for MultihashGeneric { - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let mut code = 0u64; - let mut len_choice = u.arbitrary::()? | 1; - - while len_choice & 1 == 1 { - len_choice >>= 1; - - let x = u.arbitrary::(); - let next = code - .checked_shl(8) - .zip(x.ok()) - .map(|(next, x)| next.saturating_add(x as u64)); - - match next { - None => break, - Some(next) => code = next, - } - } - - let size = u.int_in_range(0..=S)?; - let data = u.bytes(size)?; - - Ok(MultihashGeneric::wrap(code, data).unwrap()) - } - - fn size_hint(depth: usize) -> (usize, Option) { - size_hint::and(<[u8; 3]>::size_hint(depth), (0, Some(S + 8))) - } -} - -#[cfg(test)] -mod tests { - use crate::MultihashGeneric; - use arbitrary::{Arbitrary, Unstructured}; - - #[test] - fn arbitrary() { - let mut u = Unstructured::new(&[2, 4, 13, 5, 6, 7, 8, 9, 6]); - - let mh = as Arbitrary>::arbitrary(&mut u).unwrap(); - let mh2 = MultihashGeneric::<16>::wrap(1037, &[6, 7, 8, 9, 6]).unwrap(); - assert_eq!(mh, mh2); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/error.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/error.rs deleted file mode 100644 index df759e0e99bf..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/error.rs +++ /dev/null @@ -1,54 +0,0 @@ -#[cfg(not(feature = "std"))] -use core2::{error::Error as StdError, io::Error as IoError}; -#[cfg(feature = "std")] -use std::{error::Error as StdError, io::Error as IoError}; - -use unsigned_varint::decode::Error as DecodeError; -#[cfg(feature = "std")] -use unsigned_varint::io::ReadError; - -/// Multihash error. -#[derive(Debug)] -pub enum Error { - /// Io error. - Io(IoError), - /// Unsupported multihash code. - UnsupportedCode(u64), - /// Invalid multihash size. - InvalidSize(u64), - /// Invalid varint. - Varint(DecodeError), -} - -impl core::fmt::Display for Error { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match self { - Self::Io(err) => write!(f, "{}", err), - Self::UnsupportedCode(code) => write!(f, "Unsupported multihash code {}.", code), - Self::InvalidSize(size) => write!(f, "Invalid multihash size {}.", size), - Self::Varint(err) => write!(f, "{}", err), - } - } -} - -impl StdError for Error {} - -impl From for Error { - fn from(err: IoError) -> Self { - Self::Io(err) - } -} - -#[cfg(feature = "std")] -impl From for Error { - fn from(err: ReadError) -> Self { - match err { - ReadError::Io(err) => Self::Io(err), - ReadError::Decode(err) => Self::Varint(err), - _ => unreachable!(), - } - } -} - -/// Multihash result. -pub type Result = core::result::Result; diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/hasher_impl.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/hasher_impl.rs deleted file mode 100644 index 4e24439ae2ca..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/hasher_impl.rs +++ /dev/null @@ -1,336 +0,0 @@ -use crate::hasher::Hasher; - -#[cfg(feature = "std")] -use std::io; - -#[cfg(not(feature = "std"))] -use core2::io; - -macro_rules! derive_write { - ($name:ident) => { - impl io::Write for $name { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.update(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - }; -} - -#[cfg(any(feature = "blake2b", feature = "blake2s"))] -macro_rules! derive_hasher_blake { - ($module:ident, $name:ident) => { - /// Multihash hasher. - #[derive(Debug)] - pub struct $name { - state: $module::State, - digest: [u8; S], - } - - impl Default for $name { - fn default() -> Self { - let mut params = $module::Params::new(); - params.hash_length(S); - Self { - state: params.to_state(), - digest: [0; S], - } - } - } - - impl Hasher for $name { - fn update(&mut self, input: &[u8]) { - self.state.update(input); - } - - fn finalize(&mut self) -> &[u8] { - let digest = self.state.finalize(); - let digest_bytes = digest.as_bytes(); - let digest_out = &mut self.digest[..digest_bytes.len().max(S)]; - digest_out.copy_from_slice(digest_bytes); - digest_out - } - - fn reset(&mut self) { - let Self { state, .. } = Self::default(); - self.state = state; - } - } - - derive_write!($name); - }; -} - -#[cfg(feature = "blake2b")] -pub mod blake2b { - use super::*; - - derive_hasher_blake!(blake2b_simd, Blake2bHasher); - - /// 256 bit blake2b hasher. - pub type Blake2b256 = Blake2bHasher<32>; - - /// 512 bit blake2b hasher. - pub type Blake2b512 = Blake2bHasher<64>; -} - -#[cfg(feature = "blake2s")] -pub mod blake2s { - use super::*; - - derive_hasher_blake!(blake2s_simd, Blake2sHasher); - - /// 256 bit blake2b hasher. - pub type Blake2s128 = Blake2sHasher<16>; - - /// 512 bit blake2b hasher. - pub type Blake2s256 = Blake2sHasher<32>; -} - -#[cfg(feature = "blake3")] -pub mod blake3 { - use super::*; - - /// Multihash hasher. - #[derive(Debug)] - pub struct Blake3Hasher { - hasher: ::blake3::Hasher, - digest: [u8; S], - } - - impl Blake3Hasher { - /// using blake3's XOF function, fills the given slice with hash output - pub fn finalize_xof_fill(&mut self, digest_out: &mut [u8]) { - let mut digest = self.hasher.finalize_xof(); - digest.fill(digest_out) - } - } - - impl Default for Blake3Hasher { - fn default() -> Self { - let hasher = ::blake3::Hasher::new(); - - Self { - hasher, - digest: [0; S], - } - } - } - - impl Hasher for Blake3Hasher { - fn update(&mut self, input: &[u8]) { - self.hasher.update(input); - } - - fn finalize(&mut self) -> &[u8] { - let mut output = self.hasher.finalize_xof(); - output.fill(&mut self.digest); - &self.digest - } - - fn reset(&mut self) { - self.hasher.reset(); - } - } - - derive_write!(Blake3Hasher); - - /// blake3-256 hasher. - pub type Blake3_256 = Blake3Hasher<32>; -} - -#[cfg(feature = "digest")] -macro_rules! derive_rustcrypto_hasher { - ($module:ty, $name:ident, $size:expr) => { - /// Multihash hasher. - #[derive(Debug)] - pub struct $name { - state: $module, - digest: [u8; $size], - } - - impl Default for $name { - fn default() -> Self { - $name { - state: Default::default(), - digest: [0; $size], - } - } - } - - impl $crate::hasher::Hasher for $name { - fn update(&mut self, input: &[u8]) { - use digest::Digest; - self.state.update(input) - } - - fn finalize(&mut self) -> &[u8] { - use digest::Digest; - let digest = self.state.clone().finalize(); - let digest_bytes = digest.as_slice(); - let digest_out = &mut self.digest[..digest_bytes.len().max($size)]; - digest_out.copy_from_slice(digest_bytes); - digest_out - } - - fn reset(&mut self) { - use digest::Digest; - self.state.reset(); - } - } - - impl io::Write for $name { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.update(buf); - Ok(buf.len()) - } - - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - }; -} - -#[cfg(feature = "sha1")] -pub mod sha1 { - use super::*; - - derive_rustcrypto_hasher!(::sha1::Sha1, Sha1, 20); -} - -#[cfg(feature = "sha2")] -pub mod sha2 { - use super::*; - - derive_rustcrypto_hasher!(sha_2::Sha256, Sha2_256, 32); - derive_rustcrypto_hasher!(sha_2::Sha512, Sha2_512, 64); -} - -#[cfg(feature = "sha3")] -pub mod sha3 { - use super::*; - - derive_rustcrypto_hasher!(sha_3::Sha3_224, Sha3_224, 28); - derive_rustcrypto_hasher!(sha_3::Sha3_256, Sha3_256, 32); - derive_rustcrypto_hasher!(sha_3::Sha3_384, Sha3_384, 48); - derive_rustcrypto_hasher!(sha_3::Sha3_512, Sha3_512, 64); - - derive_rustcrypto_hasher!(sha_3::Keccak224, Keccak224, 28); - derive_rustcrypto_hasher!(sha_3::Keccak256, Keccak256, 32); - derive_rustcrypto_hasher!(sha_3::Keccak384, Keccak384, 48); - derive_rustcrypto_hasher!(sha_3::Keccak512, Keccak512, 64); -} - -#[cfg(feature = "ripemd")] -pub mod ripemd { - - use super::*; - - derive_rustcrypto_hasher!(ripemd_rs::Ripemd160, Ripemd160, 20); - derive_rustcrypto_hasher!(ripemd_rs::Ripemd256, Ripemd256, 32); - derive_rustcrypto_hasher!(ripemd_rs::Ripemd320, Ripemd320, 40); -} - -pub mod identity { - use super::*; - - /// Identity hasher with a maximum size. - /// - /// # Panics - /// - /// Panics if the input is bigger than the maximum size. - #[derive(Debug)] - pub struct IdentityHasher { - i: usize, - bytes: [u8; S], - } - - impl Default for IdentityHasher { - fn default() -> Self { - Self { - i: 0, - bytes: [0u8; S], - } - } - } - - impl Hasher for IdentityHasher { - fn update(&mut self, input: &[u8]) { - let start = self.i.min(self.bytes.len()); - let end = (self.i + input.len()).min(self.bytes.len()); - self.bytes[start..end].copy_from_slice(input); - self.i = end; - } - - fn finalize(&mut self) -> &[u8] { - &self.bytes[..self.i] - } - - fn reset(&mut self) { - self.i = 0 - } - } - - derive_write!(IdentityHasher); - - /// 32 byte Identity hasher (constrained to 32 bytes). - /// - /// # Panics - /// - /// Panics if the input is bigger than 32 bytes. - pub type Identity256 = IdentityHasher<32>; -} - -#[cfg(feature = "strobe")] -pub mod strobe { - use super::*; - use strobe_rs::{SecParam, Strobe}; - - /// Strobe hasher. - pub struct StrobeHasher { - strobe: Strobe, - initialized: bool, - digest: [u8; S], - } - - impl Default for StrobeHasher { - fn default() -> Self { - Self { - strobe: Strobe::new(b"StrobeHash", SecParam::B128), - initialized: false, - digest: [0; S], - } - } - } - - impl Hasher for StrobeHasher { - fn update(&mut self, input: &[u8]) { - self.strobe.ad(input, self.initialized); - self.initialized = true; - } - - fn finalize(&mut self) -> &[u8] { - self.strobe.clone().prf(&mut self.digest, false); - &self.digest - } - - fn reset(&mut self) { - let Self { strobe, .. } = Self::default(); - self.strobe = strobe; - self.initialized = false; - } - } - - derive_write!(StrobeHasher); - - /// 256 bit strobe hasher. - pub type Strobe256 = StrobeHasher<32>; - - /// 512 bit strobe hasher. - pub type Strobe512 = StrobeHasher<64>; -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/lib.rs deleted file mode 100644 index d561c27b58d9..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/lib.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! Multihash implementation. -//! -//! Feature Flags -//! ------------- -//! -//! Multihash has lots of [feature flags], by default a table with cryptographically secure hashers -//! is created. -//! -//! Some of the features are about specific hash functions, these are ("default" marks the hashers -//! that are enabled by default): -//! -//! - `blake2b`: (default) Enable Blake2b hashers -//! - `blake2s`: (default) Enable Blake2s hashers -//! - `identity`: Enable the Identity hashers (using it is discouraged as it's not a hash function -//! in the sense that it produces a fixed sized output independent of the input size) -//! - `sha1`: Enable SHA-1 hasher -//! - `sha2`: (default) Enable SHA-2 hashers -//! - `sha3`: (default) Enable SHA-3 hashers -//! - `strobe`: Enable Strobe hashers -//! -//! In order to enable all cryptographically secure hashers, you can set the `secure-hashes` -//! feature flag (enabled by default). -//! -//! The library has support for `no_std`, if you disable the `std` feature flag. -//! -//! The `multihash-impl` feature flag (enabled by default) enables a default Multihash -//! implementation that contains some of the bundled hashers. If you want a different set of hash -//! algorithms you can change this with enabled the corresponding features. -//! -//! For example if you only need SHA2 hasher, you could set the features in the `multihash` -//! dependency like this: -//! -//! ```toml -//! multihash = { version = …, default-features = false, features = ["std", "multihash-impl", "sha2"] } -//! ``` -//! -//! If you want to customize your code table even more, for example you want only one specific hash -//! digest size and not whole family, you would only enable the `derive` feature (enabled by -//! default), which enables the [`Multihash` derive], together with the hashers you want. -//! -//! The `arb` feature flag enables the quickcheck arbitrary implementation for property based -//! testing. -//! -//! For serializing the multihash there is support for [Serde] via the `serde-codec` feature and -//! the [SCALE Codec] via the `scale-codec` feature. -//! -//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section -//! [`Multihash` derive]: crate::derive -//! [Serde]: https://serde.rs -//! [SCALE Codec]: https://github.com/paritytech/parity-scale-codec - -#![deny(missing_docs, unsafe_code)] -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(feature = "alloc")] -extern crate alloc; - -#[cfg(any(test, feature = "arb"))] -mod arb; -mod error; -mod hasher; -mod hasher_impl; -mod multihash; -#[cfg(feature = "multihash-impl")] -mod multihash_impl; - -pub use crate::error::{Error, Result}; -pub use crate::hasher::Hasher; -pub use crate::multihash::{Multihash as MultihashGeneric, MultihashDigest}; -#[cfg(feature = "derive")] -pub use multihash_derive as derive; - -#[cfg(feature = "multihash-impl")] -pub use crate::multihash_impl::{Code, Multihash}; - -#[cfg(feature = "blake2b")] -pub use crate::hasher_impl::blake2b::{Blake2b256, Blake2b512, Blake2bHasher}; -#[cfg(feature = "blake2s")] -pub use crate::hasher_impl::blake2s::{Blake2s128, Blake2s256, Blake2sHasher}; -#[cfg(feature = "blake3")] -pub use crate::hasher_impl::blake3::{Blake3Hasher, Blake3_256}; -pub use crate::hasher_impl::identity::{Identity256, IdentityHasher}; -#[cfg(feature = "ripemd")] -pub use crate::hasher_impl::ripemd::{Ripemd160, Ripemd256, Ripemd320}; -#[cfg(feature = "sha1")] -pub use crate::hasher_impl::sha1::Sha1; -#[cfg(feature = "sha2")] -pub use crate::hasher_impl::sha2::{Sha2_256, Sha2_512}; -#[cfg(feature = "sha3")] -pub use crate::hasher_impl::sha3::{Keccak224, Keccak256, Keccak384, Keccak512}; -#[cfg(feature = "sha3")] -pub use crate::hasher_impl::sha3::{Sha3_224, Sha3_256, Sha3_384, Sha3_512}; -#[cfg(feature = "strobe")] -pub use crate::hasher_impl::strobe::{Strobe256, Strobe512, StrobeHasher}; diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/multihash.rs deleted file mode 100644 index c078baab3440..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/multihash.rs +++ /dev/null @@ -1,466 +0,0 @@ -use crate::Error; -#[cfg(feature = "alloc")] -use alloc::vec::Vec; -use core::convert::TryFrom; - -use core::convert::TryInto; -use core::fmt::Debug; -#[cfg(feature = "serde-codec")] -use serde_big_array::BigArray; - -use unsigned_varint::encode as varint_encode; - -#[cfg(feature = "std")] -use std::io; - -#[cfg(not(feature = "std"))] -use core2::io; - -/// Trait that implements hashing. -/// -/// It is usually implemented by a custom code table enum that derives the [`Multihash` derive]. -/// -/// [`Multihash` derive]: crate::derive -pub trait MultihashDigest: - TryFrom + Into + Send + Sync + Unpin + Copy + Eq + Debug + 'static -{ - /// Calculate the hash of some input data. - /// - /// # Example - /// - /// ``` - /// // `Code` implements `MultihashDigest` - /// use multihash::{Code, MultihashDigest}; - /// - /// let hash = Code::Sha3_256.digest(b"Hello world!"); - /// println!("{:02x?}", hash); - /// ``` - fn digest(&self, input: &[u8]) -> Multihash; - - /// Create a multihash from an existing multihash digest. - /// - /// # Example - /// - /// ``` - /// use multihash::{Code, Hasher, MultihashDigest, Sha3_256}; - /// - /// let mut hasher = Sha3_256::default(); - /// hasher.update(b"Hello world!"); - /// let hash = Code::Sha3_256.wrap(&hasher.finalize()).unwrap(); - /// println!("{:02x?}", hash); - /// ``` - fn wrap(&self, digest: &[u8]) -> Result, Error>; -} - -/// A Multihash instance that only supports the basic functionality and no hashing. -/// -/// With this Multihash implementation you can operate on Multihashes in a generic way, but -/// no hasher implementation is associated with the code. -/// -/// # Example -/// -/// ``` -/// use multihash::Multihash; -/// -/// const Sha3_256: u64 = 0x16; -/// let digest_bytes = [ -/// 0x16, 0x20, 0x64, 0x4b, 0xcc, 0x7e, 0x56, 0x43, 0x73, 0x04, 0x09, 0x99, 0xaa, 0xc8, 0x9e, -/// 0x76, 0x22, 0xf3, 0xca, 0x71, 0xfb, 0xa1, 0xd9, 0x72, 0xfd, 0x94, 0xa3, 0x1c, 0x3b, 0xfb, -/// 0xf2, 0x4e, 0x39, 0x38, -/// ]; -/// let mh = Multihash::from_bytes(&digest_bytes).unwrap(); -/// assert_eq!(mh.code(), Sha3_256); -/// assert_eq!(mh.size(), 32); -/// assert_eq!(mh.digest(), &digest_bytes[2..]); -/// ``` -#[cfg_attr(feature = "serde-codec", derive(serde::Deserialize))] -#[cfg_attr(feature = "serde-codec", derive(serde::Serialize))] -#[derive(Clone, Copy, Debug, Eq, Ord, PartialOrd)] -pub struct Multihash { - /// The code of the Multihash. - code: u64, - /// The actual size of the digest in bytes (not the allocated size). - size: u8, - /// The digest. - #[cfg_attr(feature = "serde-codec", serde(with = "BigArray"))] - digest: [u8; S], -} - -impl Default for Multihash { - fn default() -> Self { - Self { - code: 0, - size: 0, - digest: [0; S], - } - } -} - -impl Multihash { - /// Wraps the digest in a multihash. - pub const fn wrap(code: u64, input_digest: &[u8]) -> Result { - if input_digest.len() > S { - return Err(Error::InvalidSize(input_digest.len() as _)); - } - let size = input_digest.len(); - let mut digest = [0; S]; - let mut i = 0; - while i < size { - digest[i] = input_digest[i]; - i += 1; - } - Ok(Self { - code, - size: size as u8, - digest, - }) - } - - /// Returns the code of the multihash. - pub const fn code(&self) -> u64 { - self.code - } - - /// Returns the size of the digest. - pub const fn size(&self) -> u8 { - self.size - } - - /// Returns the digest. - pub fn digest(&self) -> &[u8] { - &self.digest[..self.size as usize] - } - - /// Reads a multihash from a byte stream. - pub fn read(r: R) -> Result - where - Self: Sized, - { - let (code, size, digest) = read_multihash(r)?; - Ok(Self { code, size, digest }) - } - - /// Parses a multihash from a bytes. - /// - /// You need to make sure the passed in bytes have the correct length. The digest length - /// needs to match the `size` value of the multihash. - pub fn from_bytes(mut bytes: &[u8]) -> Result - where - Self: Sized, - { - let result = Self::read(&mut bytes)?; - // There were more bytes supplied than read - if !bytes.is_empty() { - return Err(Error::InvalidSize(bytes.len().try_into().expect( - "Currently the maximum size is 255, therefore always fits into usize", - ))); - } - - Ok(result) - } - - /// Writes a multihash to a byte stream, returning the written size. - pub fn write(&self, w: W) -> Result { - write_multihash(w, self.code(), self.size(), self.digest()) - } - - /// Returns the length in bytes needed to encode this multihash into bytes. - pub fn encoded_len(&self) -> usize { - let mut code_buf = varint_encode::u64_buffer(); - let code = varint_encode::u64(self.code, &mut code_buf); - - let mut size_buf = varint_encode::u8_buffer(); - let size = varint_encode::u8(self.size, &mut size_buf); - - code.len() + size.len() + usize::from(self.size) - } - - #[cfg(feature = "alloc")] - /// Returns the bytes of a multihash. - pub fn to_bytes(&self) -> Vec { - let mut bytes = Vec::with_capacity(self.size().into()); - let written = self - .write(&mut bytes) - .expect("writing to a vec should never fail"); - debug_assert_eq!(written, bytes.len()); - bytes - } - - /// Truncates the multihash to the given size. It's up to the caller to ensure that the new size - /// is secure (cryptographically) to use. - /// - /// If the new size is larger than the current size, this method does nothing. - /// - /// ``` - /// use multihash::{Code, MultihashDigest}; - /// - /// let hash = Code::Sha3_256.digest(b"Hello world!").truncate(20); - /// ``` - pub fn truncate(&self, size: u8) -> Self { - let mut mh = *self; - mh.size = mh.size.min(size); - mh - } - - /// Resizes the backing multihash buffer. This function fails if the hash digest is larger than - /// the target size. - /// - /// ``` - /// use multihash::{Code, MultihashDigest, MultihashGeneric}; - /// - /// let hash = Code::Sha3_256.digest(b"Hello world!"); - /// let large_hash: MultihashGeneric<32> = hash.resize().unwrap(); - /// ``` - pub fn resize(&self) -> Result, Error> { - let size = self.size as usize; - if size > R { - return Err(Error::InvalidSize(self.size as u64)); - } - let mut mh = Multihash { - code: self.code, - size: self.size, - digest: [0; R], - }; - mh.digest[..size].copy_from_slice(&self.digest[..size]); - Ok(mh) - } - - /// Decomposes struct, useful when needing a `Sized` array or moving all the data into another type - /// - /// It is recommended to use `digest()` `code()` and `size()` for most cases - /// - /// ``` - /// use multihash::{Code, MultihashDigest}; - /// struct Foo { - /// arr: [u8; S], - /// len: usize, - /// } - /// - /// let hash = Code::Sha3_256.digest(b"Hello world!"); - /// let (.., arr, size) = hash.into_inner(); - /// let foo = Foo { arr, len: size as usize }; - /// ``` - pub fn into_inner(self) -> (u64, [u8; S], u8) { - let Self { code, digest, size } = self; - (code, digest, size) - } -} - -// Don't hash the whole allocated space, but just the actual digest -#[allow(unknown_lints, renamed_and_removed_lints)] -#[allow(clippy::derived_hash_with_manual_eq, clippy::derive_hash_xor_eq)] -impl core::hash::Hash for Multihash { - fn hash(&self, state: &mut T) { - self.code.hash(state); - self.digest().hash(state); - } -} - -#[cfg(feature = "alloc")] -impl From> for Vec { - fn from(multihash: Multihash) -> Self { - multihash.to_bytes() - } -} - -impl PartialEq> for Multihash { - fn eq(&self, other: &Multihash) -> bool { - // NOTE: there's no need to explicitly check the sizes, that's implicit in the digest. - self.code == other.code && self.digest() == other.digest() - } -} - -#[cfg(feature = "scale-codec")] -impl parity_scale_codec::Encode for Multihash { - fn encode_to(&self, dest: &mut EncOut) { - self.code.encode_to(dest); - self.size.encode_to(dest); - // **NOTE** We write the digest directly to dest, since we have known the size of digest. - // - // We do not choose to encode &[u8] directly, because it will add extra bytes (the compact length of digest). - // For a valid multihash, the length of digest must equal to `size`. - // Therefore, we can only read raw bytes whose length is equal to `size` when decoding. - dest.write(self.digest()); - } -} - -#[cfg(feature = "scale-codec")] -impl parity_scale_codec::EncodeLike for Multihash {} - -#[cfg(feature = "scale-codec")] -impl parity_scale_codec::Decode for Multihash { - fn decode( - input: &mut DecIn, - ) -> Result { - let mut mh = Multihash { - code: parity_scale_codec::Decode::decode(input)?, - size: parity_scale_codec::Decode::decode(input)?, - digest: [0; S], - }; - if mh.size as usize > S { - return Err(parity_scale_codec::Error::from("invalid size")); - } - // For a valid multihash, the length of digest must equal to the size. - input.read(&mut mh.digest[..mh.size as usize])?; - Ok(mh) - } -} - -/// Writes the multihash to a byte stream. -pub fn write_multihash(mut w: W, code: u64, size: u8, digest: &[u8]) -> Result -where - W: io::Write, -{ - let mut code_buf = varint_encode::u64_buffer(); - let code = varint_encode::u64(code, &mut code_buf); - - let mut size_buf = varint_encode::u8_buffer(); - let size = varint_encode::u8(size, &mut size_buf); - - let written = code.len() + size.len() + digest.len(); - - w.write_all(code)?; - w.write_all(size)?; - w.write_all(digest)?; - - Ok(written) -} - -/// Reads a multihash from a byte stream that contains a full multihash (code, size and the digest) -/// -/// Returns the code, size and the digest. The size is the actual size and not the -/// maximum/allocated size of the digest. -/// -/// Currently the maximum size for a digest is 255 bytes. -pub fn read_multihash(mut r: R) -> Result<(u64, u8, [u8; S]), Error> -where - R: io::Read, -{ - let code = read_u64(&mut r)?; - let size = read_u64(&mut r)?; - - if size > S as u64 || size > u8::MAX as u64 { - return Err(Error::InvalidSize(size)); - } - - let mut digest = [0; S]; - r.read_exact(&mut digest[..size as usize])?; - Ok((code, size as u8, digest)) -} - -#[cfg(feature = "std")] -pub(crate) use unsigned_varint::io::read_u64; - -/// Reads 64 bits from a byte array into a u64 -/// Adapted from unsigned-varint's generated read_u64 function at -/// https://github.com/paritytech/unsigned-varint/blob/master/src/io.rs -#[cfg(not(feature = "std"))] -pub(crate) fn read_u64(mut r: R) -> Result { - use unsigned_varint::decode; - let mut b = varint_encode::u64_buffer(); - for i in 0..b.len() { - let n = r.read(&mut (b[i..i + 1]))?; - if n == 0 { - return Err(Error::Varint(decode::Error::Insufficient)); - } else if decode::is_last(b[i]) { - return decode::u64(&b[..=i]) - .map(|decoded| decoded.0) - .map_err(Error::Varint); - } - } - Err(Error::Varint(decode::Error::Overflow)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::multihash_impl::Code; - - #[test] - fn roundtrip() { - let hash = Code::Sha2_256.digest(b"hello world"); - let mut buf = [0u8; 35]; - let written = hash.write(&mut buf[..]).unwrap(); - let hash2 = Multihash::<32>::read(&buf[..]).unwrap(); - assert_eq!(hash, hash2); - assert_eq!(hash.encoded_len(), written); - } - - #[test] - fn test_truncate_down() { - let hash = Code::Sha2_256.digest(b"hello world"); - let small = hash.truncate(20); - assert_eq!(small.size(), 20); - } - - #[test] - fn test_truncate_up() { - let hash = Code::Sha2_256.digest(b"hello world"); - let small = hash.truncate(100); - assert_eq!(small.size(), 32); - } - - #[test] - fn test_resize_fits() { - let hash = Code::Sha2_256.digest(b"hello world"); - let _: Multihash<32> = hash.resize().unwrap(); - } - - #[test] - fn test_resize_up() { - let hash = Code::Sha2_256.digest(b"hello world"); - let _: Multihash<100> = hash.resize().unwrap(); - } - - #[test] - fn test_resize_truncate() { - let hash = Code::Sha2_256.digest(b"hello world"); - hash.resize::<20>().unwrap_err(); - } - - #[test] - #[cfg(feature = "scale-codec")] - fn test_scale() { - use parity_scale_codec::{Decode, Encode}; - - let mh1 = Code::Sha2_256.digest(b"hello world"); - // println!("mh1: code = {}, size = {}, digest = {:?}", mh1.code(), mh1.size(), mh1.digest()); - let mh1_bytes = mh1.encode(); - // println!("Multihash<32>: {}", hex::encode(&mh1_bytes)); - let mh2: Multihash<32> = Decode::decode(&mut &mh1_bytes[..]).unwrap(); - assert_eq!(mh1, mh2); - - let mh3: Multihash<64> = Code::Sha2_256.digest(b"hello world"); - // println!("mh3: code = {}, size = {}, digest = {:?}", mh3.code(), mh3.size(), mh3.digest()); - let mh3_bytes = mh3.encode(); - // println!("Multihash<64>: {}", hex::encode(&mh3_bytes)); - let mh4: Multihash<64> = Decode::decode(&mut &mh3_bytes[..]).unwrap(); - assert_eq!(mh3, mh4); - - assert_eq!(mh1_bytes, mh3_bytes); - } - - #[test] - #[cfg(feature = "serde-codec")] - fn test_serde() { - let mh = Multihash::<32>::default(); - let bytes = serde_json::to_string(&mh).unwrap(); - let mh2: Multihash<32> = serde_json::from_str(&bytes).unwrap(); - assert_eq!(mh, mh2); - } - - #[test] - fn test_eq_sizes() { - let mh1 = Multihash::<32>::default(); - let mh2 = Multihash::<64>::default(); - assert_eq!(mh1, mh2); - } - - #[test] - fn decode_non_minimal_error() { - // This is a non-minimal varint. - let data = [241, 0, 0, 0, 0, 0, 128, 132, 132, 132, 58]; - let result = read_u64(&data[..]); - assert!(result.is_err()); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/multihash_impl.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/multihash_impl.rs deleted file mode 100644 index 29face0b21aa..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/multihash_impl.rs +++ /dev/null @@ -1,126 +0,0 @@ -pub use multihash_derive::Multihash; - -/// Default (cryptographically secure) Multihash implementation. -/// -/// This is a default set of hashing algorithms. Usually applications would use their own subset of -/// algorithms. See the [`Multihash` derive] for more information. -/// -/// [`Multihash` derive]: crate::derive -#[cfg_attr(feature = "serde-codec", derive(serde::Deserialize))] -#[cfg_attr(feature = "serde-codec", derive(serde::Serialize))] -#[derive(Copy, Clone, Debug, Eq, Multihash, PartialEq)] -#[mh(alloc_size = 64)] -pub enum Code { - /// SHA-256 (32-byte hash size) - #[cfg(feature = "sha2")] - #[mh(code = 0x12, hasher = crate::Sha2_256)] - Sha2_256, - /// SHA-512 (64-byte hash size) - #[cfg(feature = "sha2")] - #[mh(code = 0x13, hasher = crate::Sha2_512)] - Sha2_512, - /// SHA3-224 (28-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x17, hasher = crate::Sha3_224)] - Sha3_224, - /// SHA3-256 (32-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x16, hasher = crate::Sha3_256)] - Sha3_256, - /// SHA3-384 (48-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x15, hasher = crate::Sha3_384)] - Sha3_384, - /// SHA3-512 (64-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x14, hasher = crate::Sha3_512)] - Sha3_512, - /// Keccak-224 (28-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x1a, hasher = crate::Keccak224)] - Keccak224, - /// Keccak-256 (32-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x1b, hasher = crate::Keccak256)] - Keccak256, - /// Keccak-384 (48-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x1c, hasher = crate::Keccak384)] - Keccak384, - /// Keccak-512 (64-byte hash size) - #[cfg(feature = "sha3")] - #[mh(code = 0x1d, hasher = crate::Keccak512)] - Keccak512, - /// BLAKE2b-256 (32-byte hash size) - #[cfg(feature = "blake2b")] - #[mh(code = 0xb220, hasher = crate::Blake2b256)] - Blake2b256, - /// BLAKE2b-512 (64-byte hash size) - #[cfg(feature = "blake2b")] - #[mh(code = 0xb240, hasher = crate::Blake2b512)] - Blake2b512, - /// BLAKE2s-128 (16-byte hash size) - #[cfg(feature = "blake2s")] - #[mh(code = 0xb250, hasher = crate::Blake2s128)] - Blake2s128, - /// BLAKE2s-256 (32-byte hash size) - #[cfg(feature = "blake2s")] - #[mh(code = 0xb260, hasher = crate::Blake2s256)] - Blake2s256, - /// BLAKE3-256 (32-byte hash size) - #[cfg(feature = "blake3")] - #[mh(code = 0x1e, hasher = crate::Blake3_256)] - Blake3_256, - /// RIPEMD-160 (20-byte hash size) - #[cfg(feature = "ripemd")] - #[mh(code = 0x1053, hasher = crate::Ripemd160)] - Ripemd160, - /// RIPEMD-256 (32-byte hash size) - #[cfg(feature = "ripemd")] - #[mh(code = 0x1054, hasher = crate::Ripemd256)] - Ripemd256, - /// RIPEMD-320 (40-byte hash size) - #[cfg(feature = "ripemd")] - #[mh(code = 0x1055, hasher = crate::Ripemd320)] - Ripemd320, - - // The following hashes are not cryptographically secure hashes and are not enabled by default - /// Identity hash (max. 64 bytes) - #[cfg(feature = "identity")] - #[mh(code = 0x00, hasher = crate::IdentityHasher::<64>)] - Identity, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::hasher::Hasher; - use crate::hasher_impl::sha3::{Sha3_256, Sha3_512}; - use crate::multihash::MultihashDigest; - - #[test] - fn test_hasher_256() { - let mut hasher = Sha3_256::default(); - hasher.update(b"hello world"); - let digest = hasher.finalize(); - let hash = Code::Sha3_256.wrap(digest).unwrap(); - let hash2 = Code::Sha3_256.digest(b"hello world"); - assert_eq!(hash.code(), u64::from(Code::Sha3_256)); - assert_eq!(hash.size(), 32); - assert_eq!(hash.digest(), digest); - assert_eq!(hash, hash2); - } - - #[test] - fn test_hasher_512() { - let mut hasher = Sha3_512::default(); - hasher.update(b"hello world"); - let digest = hasher.finalize(); - let hash = Code::Sha3_512.wrap(digest).unwrap(); - let hash2 = Code::Sha3_512.digest(b"hello world"); - assert_eq!(hash.code(), u64::from(Code::Sha3_512)); - assert_eq!(hash.size(), 64); - assert_eq!(hash.digest(), digest); - assert_eq!(hash, hash2); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/tests/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/tests/lib.rs deleted file mode 100644 index c0ebca55cca3..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/tests/lib.rs +++ /dev/null @@ -1,427 +0,0 @@ -use std::io::{Cursor, Write}; - -use multihash::{ - derive::Multihash, Blake2b256, Blake2b512, Blake2s128, Blake2s256, Blake3_256, Hasher, - Identity256, Keccak224, Keccak256, Keccak384, Keccak512, MultihashDigest, Sha1, Sha2_256, - Sha2_512, Sha3_224, Sha3_256, Sha3_384, Sha3_512, Strobe256, Strobe512, -}; - -#[cfg(feature = "ripemd")] -use multihash::{Ripemd160, Ripemd256, Ripemd320}; - -#[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)] -#[mh(alloc_size = 64)] -pub enum Code { - #[mh(code = 0x00, hasher = Identity256)] - Identity, - #[mh(code = 0x11, hasher = Sha1)] - Sha1, - #[mh(code = 0x12, hasher = Sha2_256)] - Sha2_256, - #[mh(code = 0x13, hasher = Sha2_512)] - Sha2_512, - #[mh(code = 0x17, hasher = Sha3_224)] - Sha3_224, - #[mh(code = 0x16, hasher = Sha3_256)] - Sha3_256, - #[mh(code = 0x15, hasher = Sha3_384)] - Sha3_384, - #[mh(code = 0x14, hasher = Sha3_512)] - Sha3_512, - #[mh(code = 0x1a, hasher = Keccak224)] - Keccak224, - #[mh(code = 0x1b, hasher = Keccak256)] - Keccak256, - #[mh(code = 0x1c, hasher = Keccak384)] - Keccak384, - #[mh(code = 0x1d, hasher = Keccak512)] - Keccak512, - #[mh(code = 0xb220, hasher = Blake2b256)] - Blake2b256, - #[mh(code = 0xb240, hasher = Blake2b512)] - Blake2b512, - #[mh(code = 0xb250, hasher = Blake2s128)] - Blake2s128, - #[mh(code = 0xb260, hasher = Blake2s256)] - Blake2s256, - #[mh(code = 0x1e, hasher = Blake3_256)] - Blake3_256, - #[mh(code = 0x3312e7, hasher = Strobe256)] - Strobe256, - #[mh(code = 0x3312e8, hasher = Strobe512)] - Strobe512, - #[cfg(feature = "ripemd")] - #[mh(code = 0x1053, hasher = Ripemd160)] - Ripemd160, - #[cfg(feature = "ripemd")] - #[mh(code = 0x1054, hasher = Ripemd256)] - Ripemd256, - #[cfg(feature = "ripemd")] - #[mh(code = 0x1055, hasher = Ripemd320)] - Ripemd320, -} - -macro_rules! assert_encode { - // Mutlihash enum member, Multihash code, input, Multihash as hex - {$( $alg:ty, $code:expr, $data:expr, $expect:expr; )*} => { - $( - let expected = hex::decode($expect).unwrap(); - - // From code - assert_eq!( - $code.digest($data).to_bytes(), - expected, - "{:?} encodes correctly (from code)", stringify!($alg) - ); - - // From incremental hashing - let mut hasher = <$alg>::default(); - hasher.update($data); - assert_eq!( - $code.wrap(hasher.finalize()).unwrap().to_bytes(), - expected, - "{:?} encodes correctly (from hasher)", stringify!($alg) - ); - )* - } -} - -#[allow(clippy::cognitive_complexity)] -#[test] -fn multihash_encode() { - assert_encode! { - Identity256, Code::Identity, b"beep boop", "00096265657020626f6f70"; - Sha1, Code::Sha1, b"beep boop", "11147c8357577f51d4f0a8d393aa1aaafb28863d9421"; - Sha2_256, Code::Sha2_256, b"helloworld", "1220936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af"; - Sha2_256, Code::Sha2_256, b"beep boop", "122090ea688e275d580567325032492b597bc77221c62493e76330b85ddda191ef7c"; - Sha2_512, Code::Sha2_512, b"hello world", "1340309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"; - Sha3_224, Code::Sha3_224, b"hello world", "171Cdfb7f18c77e928bb56faeb2da27291bd790bc1045cde45f3210bb6c5"; - Sha3_256, Code::Sha3_256, b"hello world", "1620644bcc7e564373040999aac89e7622f3ca71fba1d972fd94a31c3bfbf24e3938"; - Sha3_384, Code::Sha3_384, b"hello world", "153083bff28dde1b1bf5810071c6643c08e5b05bdb836effd70b403ea8ea0a634dc4997eb1053aa3593f590f9c63630dd90b"; - Sha3_512, Code::Sha3_512, b"hello world", "1440840006653e9ac9e95117a15c915caab81662918e925de9e004f774ff82d7079a40d4d27b1b372657c61d46d470304c88c788b3a4527ad074d1dccbee5dbaa99a"; - Keccak224, Code::Keccak224, b"hello world", "1A1C25f3ecfebabe99686282f57f5c9e1f18244cfee2813d33f955aae568"; - Keccak256, Code::Keccak256, b"hello world", "1B2047173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"; - Keccak384, Code::Keccak384, b"hello world", "1C3065fc99339a2a40e99d3c40d695b22f278853ca0f925cde4254bcae5e22ece47e6441f91b6568425adc9d95b0072eb49f"; - Keccak512, Code::Keccak512, b"hello world", "1D403ee2b40047b8060f68c67242175660f4174d0af5c01d47168ec20ed619b0b7c42181f40aa1046f39e2ef9efc6910782a998e0013d172458957957fac9405b67d"; - Blake2b512, Code::Blake2b512, b"hello world", "c0e40240021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0"; - Blake2s256, Code::Blake2s256, b"hello world", "e0e402209aec6806794561107e594b1f6a8a6b0c92a0cba9acf5e5e93cca06f781813b0b"; - Blake2b256, Code::Blake2b256, b"hello world", "a0e40220256c83b297114d201b30179f3f0ef0cace9783622da5974326b436178aeef610"; - Blake2s128, Code::Blake2s128, b"hello world", "d0e4021037deae0226c30da2ab424a7b8ee14e83"; - Blake3_256, Code::Blake3_256, b"hello world", "1e20d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24"; - } - - #[cfg(feature = "ripemd")] - assert_encode! { - Ripemd160, Code::Ripemd160, b"hello world", "d3201498c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"; - Ripemd256, Code::Ripemd256, b"hello world", "d420200d375cf9d9ee95a3bb15f757c81e93bb0ad963edf69dc4d12264031814608e37"; - Ripemd320, Code::Ripemd320, b"hello world", "d520280e12fe7d075f8e319e07c106917eddb0135e9a10aefb50a8a07ccb0582ff1fa27b95ed5af57fd5c6"; - } -} - -macro_rules! assert_decode { - {$( $code:expr, $hash:expr; )*} => { - $( - let hash = hex::decode($hash).unwrap(); - assert_eq!( - Multihash::from_bytes(&hash).unwrap().code(), - u64::from($code), - "{:?} decodes correctly", stringify!($code) - ); - )* - } -} - -#[test] -fn assert_decode() { - assert_decode! { - Code::Identity, "000a68656c6c6f776f726c64"; - Code::Sha1, "11147c8357577f51d4f0a8d393aa1aaafb28863d9421"; - Code::Sha2_256, "1220936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af"; - Code::Sha2_256, "122090ea688e275d580567325032492b597bc77221c62493e76330b85ddda191ef7c"; - Code::Sha2_512, "1340309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"; - Code::Sha3_224, "171Cdfb7f18c77e928bb56faeb2da27291bd790bc1045cde45f3210bb6c5"; - Code::Sha3_256, "1620644bcc7e564373040999aac89e7622f3ca71fba1d972fd94a31c3bfbf24e3938"; - Code::Sha3_384, "153083bff28dde1b1bf5810071c6643c08e5b05bdb836effd70b403ea8ea0a634dc4997eb1053aa3593f590f9c63630dd90b"; - Code::Sha3_512, "1440840006653e9ac9e95117a15c915caab81662918e925de9e004f774ff82d7079a40d4d27b1b372657c61d46d470304c88c788b3a4527ad074d1dccbee5dbaa99a"; - Code::Keccak224, "1A1C25f3ecfebabe99686282f57f5c9e1f18244cfee2813d33f955aae568"; - Code::Keccak256, "1B2047173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"; - Code::Keccak384, "1C3065fc99339a2a40e99d3c40d695b22f278853ca0f925cde4254bcae5e22ece47e6441f91b6568425adc9d95b0072eb49f"; - Code::Keccak512, "1D403ee2b40047b8060f68c67242175660f4174d0af5c01d47168ec20ed619b0b7c42181f40aa1046f39e2ef9efc6910782a998e0013d172458957957fac9405b67d"; - Code::Blake2b512, "c0e40240021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0"; - Code::Blake2s256, "e0e402209aec6806794561107e594b1f6a8a6b0c92a0cba9acf5e5e93cca06f781813b0b"; - Code::Blake2b256, "a0e40220256c83b297114d201b30179f3f0ef0cace9783622da5974326b436178aeef610"; - Code::Blake2s128, "d0e4021037deae0226c30da2ab424a7b8ee14e83"; - Code::Blake3_256, "1e20d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24"; - } - #[cfg(feature = "ripemd")] - assert_decode! { - Code::Ripemd160, "d3201498c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"; - Code::Ripemd256, "d420200d375cf9d9ee95a3bb15f757c81e93bb0ad963edf69dc4d12264031814608e37"; - Code::Ripemd320, "d520280e12fe7d075f8e319e07c106917eddb0135e9a10aefb50a8a07ccb0582ff1fa27b95ed5af57fd5c6"; - } -} - -macro_rules! assert_roundtrip { - ($( $code:expr, $alg:ident; )*) => { - $( - // Hashing with one call - { - let hash = $code.digest(b"helloworld"); - assert_eq!( - Multihash::from_bytes(&hash.to_bytes()).unwrap().code(), - hash.code() - ); - } - // Hashing incrementally - { - let mut hasher = <$alg>::default(); - hasher.update(b"helloworld"); - let hash = $code.wrap(hasher.finalize()).unwrap(); - assert_eq!( - Multihash::from_bytes(&hash.to_bytes()).unwrap().code(), - hash.code() - ); - } - // Hashing as `Write` implementation - { - let mut hasher = <$alg>::default(); - hasher.write_all(b"helloworld").unwrap(); - let hash = $code.wrap(hasher.finalize()).unwrap(); - assert_eq!( - Multihash::from_bytes(&hash.to_bytes()).unwrap().code(), - hash.code() - ); - } - )* - } -} - -#[allow(clippy::cognitive_complexity)] -#[test] -fn assert_roundtrip() { - assert_roundtrip!( - Code::Identity, Identity256; - Code::Sha1, Sha1; - Code::Sha2_256, Sha2_256; - Code::Sha2_512, Sha2_512; - Code::Sha3_224, Sha3_224; - Code::Sha3_256, Sha3_256; - Code::Sha3_384, Sha3_384; - Code::Sha3_512, Sha3_512; - Code::Keccak224, Keccak224; - Code::Keccak256, Keccak256; - Code::Keccak384, Keccak384; - Code::Keccak512, Keccak512; - Code::Blake2b512, Blake2b512; - Code::Blake2s256, Blake2s256; - Code::Blake3_256, Blake3_256; - ); - - #[cfg(feature = "ripemd")] - assert_roundtrip! { - Code::Ripemd160, Ripemd160; - Code::Ripemd256, Ripemd256; - Code::Ripemd320, Ripemd320; - } -} - -/// Testing the public interface of `Multihash` and coversions to it -fn multihash_methods(code: Code, prefix: &str, digest_str: &str) -where - H: Hasher + Default, -{ - let digest = hex::decode(digest_str).unwrap(); - let expected_bytes = hex::decode(format!("{}{}", prefix, digest_str)).unwrap(); - let mut expected_cursor = Cursor::new(&expected_bytes); - let multihash = code.digest(b"hello world"); - - assert_eq!(Multihash::wrap(code.into(), &digest).unwrap(), multihash); - assert_eq!(multihash.code(), u64::from(code)); - assert_eq!(multihash.size() as usize, digest.len()); - assert_eq!(multihash.digest(), digest); - assert_eq!(Multihash::read(&mut expected_cursor).unwrap(), multihash); - assert_eq!(Multihash::from_bytes(&expected_bytes).unwrap(), multihash); - let mut written_buf = Vec::new(); - multihash.write(&mut written_buf).unwrap(); - assert_eq!(written_buf, expected_bytes); - assert_eq!(multihash.to_bytes(), expected_bytes); - - // Test from hasher digest conversion - let mut hasher = H::default(); - hasher.update(b"hello world"); - let multihash_from_digest = code.wrap(hasher.finalize()).unwrap(); - assert_eq!(multihash_from_digest.code(), u64::from(code)); - assert_eq!(multihash_from_digest.size() as usize, digest.len()); - assert_eq!(multihash_from_digest.digest(), digest); -} - -#[test] -fn test_multihash_methods() { - multihash_methods::(Code::Identity, "000b", "68656c6c6f20776f726c64"); - multihash_methods::( - Code::Sha1, - "1114", - "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed", - ); - multihash_methods::( - Code::Sha2_256, - "1220", - "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9", - ); - multihash_methods::( - Code::Sha2_512, - "1340", - "309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"); - multihash_methods::( - Code::Sha3_224, - "171C", - "dfb7f18c77e928bb56faeb2da27291bd790bc1045cde45f3210bb6c5", - ); - multihash_methods::( - Code::Sha3_256, - "1620", - "644bcc7e564373040999aac89e7622f3ca71fba1d972fd94a31c3bfbf24e3938", - ); - multihash_methods::( - Code::Sha3_384, - "1530", - "83bff28dde1b1bf5810071c6643c08e5b05bdb836effd70b403ea8ea0a634dc4997eb1053aa3593f590f9c63630dd90b"); - multihash_methods::( - Code::Sha3_512, - "1440", - "840006653e9ac9e95117a15c915caab81662918e925de9e004f774ff82d7079a40d4d27b1b372657c61d46d470304c88c788b3a4527ad074d1dccbee5dbaa99a"); - multihash_methods::( - Code::Keccak224, - "1A1C", - "25f3ecfebabe99686282f57f5c9e1f18244cfee2813d33f955aae568", - ); - multihash_methods::( - Code::Keccak256, - "1B20", - "47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad", - ); - multihash_methods::( - Code::Keccak384, - "1C30", - "65fc99339a2a40e99d3c40d695b22f278853ca0f925cde4254bcae5e22ece47e6441f91b6568425adc9d95b0072eb49f"); - multihash_methods::( - Code::Keccak512, - "1D40", - "3ee2b40047b8060f68c67242175660f4174d0af5c01d47168ec20ed619b0b7c42181f40aa1046f39e2ef9efc6910782a998e0013d172458957957fac9405b67d"); - multihash_methods::( - Code::Blake2b512, - "c0e40240", - "021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0"); - multihash_methods::( - Code::Blake2s256, - "e0e40220", - "9aec6806794561107e594b1f6a8a6b0c92a0cba9acf5e5e93cca06f781813b0b", - ); - multihash_methods::( - Code::Blake2b256, - "a0e40220", - "256c83b297114d201b30179f3f0ef0cace9783622da5974326b436178aeef610", - ); - multihash_methods::( - Code::Blake2s128, - "d0e40210", - "37deae0226c30da2ab424a7b8ee14e83", - ); - multihash_methods::( - Code::Blake3_256, - "1e20", - "d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24", - ); - #[cfg(feature = "ripemd")] - { - multihash_methods::( - Code::Ripemd160, - "d32014", - "98c615784ccb5fe5936fbc0cbe9dfdb408d92f0f", - ); - multihash_methods::( - Code::Ripemd256, - "d42020", - "0d375cf9d9ee95a3bb15f757c81e93bb0ad963edf69dc4d12264031814608e37", - ); - multihash_methods::( - Code::Ripemd320, - "d52028", - "0e12fe7d075f8e319e07c106917eddb0135e9a10aefb50a8a07ccb0582ff1fa27b95ed5af57fd5c6", - ); - } -} - -#[test] -#[should_panic] -fn test_long_identity_hash() { - // The identity hash allocates if the input size is bigger than the maximum size - let input = b"abcdefghijklmnopqrstuvwxyz abcdefghijklmnopqrstuvwxyz abcdefghijklmnopqrstuvwxyz"; - Code::Identity.digest(input); -} - -#[test] -fn multihash_errors() { - assert!( - Multihash::from_bytes(&[]).is_err(), - "Should error on empty data" - ); - assert!( - Multihash::from_bytes(&[1, 2, 3]).is_err(), - "Should error on invalid multihash" - ); - assert!( - Multihash::from_bytes(&[1, 2, 3]).is_err(), - "Should error on invalid prefix" - ); - assert!( - Multihash::from_bytes(&[0x12, 0x20, 0xff]).is_err(), - "Should error on correct prefix with wrong digest" - ); - let identity_code: u8 = 0x00; - let identity_length = 3; - assert!( - Multihash::from_bytes(&[identity_code, identity_length, 1, 2, 3, 4]).is_err(), - "Should error on wrong hash length" - ); -} - -#[test] -fn blak3_non_default_digest() { - use multihash::Blake3Hasher; - const DIGEST_SIZE: usize = 16; - pub struct ContentHasher(Blake3Hasher); - - pub struct ContentHash([u8; DIGEST_SIZE]); - - impl ContentHasher { - fn new() -> ContentHasher { - ContentHasher(Blake3Hasher::default()) - } - - fn write(&mut self, input: &[u8]) { - self.0.update(input); - } - - fn finish(&mut self) -> ContentHash { - let hash = multihash::Code::Blake3_256.wrap(self.0.finalize()).unwrap(); - let resized_hash = hash.resize::().unwrap(); - - let mut content = ContentHash([0u8; DIGEST_SIZE]); - content.0.copy_from_slice(resized_hash.digest()); - content - } - - fn reset(&mut self) { - self.0.reset(); - } - } - - let mut hasher = ContentHasher::new(); - hasher.write("foobar".as_bytes()); - let content_hash = hasher.finish(); - hasher.reset(); - - let expected = hex::decode("aa51dcd43d5c6c5203ee16906fd6b35d").unwrap(); - assert_eq!(&content_hash.0, expected.as_slice()) -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.cargo_vcs_info.json new file mode 100644 index 000000000000..e633a8dccd62 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "4c0ef5268355308d7f083482dad1c81318db4f6b" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/codecov.yml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/codecov.yml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/codecov.yml rename to third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/codecov.yml diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/dependabot.yml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/dependabot.yml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.github/dependabot.yml rename to third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/dependabot.yml diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/workflows/build.yml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/workflows/build.yml new file mode 100644 index 000000000000..5b39d193bd05 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.github/workflows/build.yml @@ -0,0 +1,162 @@ +name: build + +on: [push, pull_request] + +jobs: + build: + name: Build + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, macos-latest, windows-latest] + toolchain: [stable] + runs-on: ${{ matrix.platform }} + + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Cache Dependencies & Build Outputs + uses: actions/cache@v4 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.toolchain }} + components: rustfmt, clippy + + - name: Check Code Format + run: cargo fmt --all -- --check + shell: bash + + - name: Code Lint + run: cargo clippy --all-targets --all-features --workspace -- -D warnings + shell: bash + + - name: Code Lint Without Default Features + run: cargo clippy --no-default-features --workspace -- -D warnings + shell: bash + + - name: Test + run: cargo test --all-features --workspace + shell: bash + + build-no-std: + name: Build no_std + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: thumbv6m-none-eabi + + - name: Build + run: cargo build --no-default-features --workspace --target thumbv6m-none-eabi + shell: bash + + build-no-std-with-serde: + name: Build no_std with `serde` feature enabled + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: thumbv6m-none-eabi + + - name: Build + run: cargo build --no-default-features --features serde --workspace --target thumbv6m-none-eabi + shell: bash + + msrv: + name: MSRV + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Extract MSRV from Cargo.toml + run: | + MSRV=$(cargo metadata --format-version 1 --no-deps | jq -r '.packages[] | select(.name == "multihash") | .rust_version') + echo "MSRV=$MSRV" >> $GITHUB_ENV + + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.MSRV }} + + - uses: Swatinem/rust-cache@v2 + + - run: cargo +"$MSRV" build --package multihash + + coverage: + name: Code Coverage + runs-on: ubuntu-latest + container: + image: xd009642/tarpaulin:0.31.2 + options: --security-opt seccomp=unconfined + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Generate code coverage + run: cargo tarpaulin --verbose --all-features --workspace --timeout 120 --out Xml + + - name: Upload Code Coverage + uses: codecov/codecov-action@v4 + + cargo-deny: + name: Cargo Deny + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Install cargo-deny + uses: taiki-e/install-action@v2 + with: + tool: cargo-deny + + - name: Cargo Deny - Check + run: cargo deny check + shell: bash + + # todo: run on all crates + cargo-hack-codetable: + name: Cargo Hack - codetable + runs-on: ubuntu-latest + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Install cargo-hack + uses: taiki-e/install-action@v2 + with: + tool: cargo-hack + + - name: Cargo Hack - Check each feature + run: cargo hack check -p multihash-codetable --each-feature + shell: bash + env: + RUSTFLAGS: -D warnings + + semver-checks: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Check semver + uses: obi1kenobi/cargo-semver-checks-action@v2 diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.gitignore b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.gitignore similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/.gitignore rename to third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/.gitignore diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/CHANGELOG.md new file mode 100644 index 000000000000..c7ca1c76175f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/CHANGELOG.md @@ -0,0 +1,146 @@ + +## [](https://github.com/multiformats/rust-multihash/compare/v0.19.1...v0.19.2) (2023-10-23) + +### Dependency Updates + +- `multihash` 0.19.2 + - update `unsigned-varint` +- `codetable` 0.1.4 + - update `strobe` +- `multihash-derive` 0.9.1 + - update `multihash-derive-impl` +- `multihash-derive-impl` 0.9.1 + - remove `proc-macro-error` dependency + - update `synstructure` + - update `syn` to v2 + +## [](https://github.com/multiformats/rust-multihash/compare/v0.19.0...v0.19.1) (2023-09-06) + +### Bug Fixes + +* make Serde (de)serialization no_std compatible ([#337](https://github.com/multiformats/rust-multihash/issues/337)) ([7ad5161](https://github.com/multiformats/rust-multihash/commit/7ad51614ad347bfa8c6f421986abc517e04091f6)), closes 336[#](https://github.com/multiformats/rust-multihash/issues/336). This was a regression introduced in v0.19.0. + + +## [](https://github.com/multiformats/rust-multihash/compare/v0.18.0...v0.19.0) (2023-06-06) + + +### ⚠ BREAKING CHANGES + +* the Serde serialization format changed +* split crates into multiple to isolate breaking changes +* `identity` hasher was removed + +See the migration section below for help on upgrading. + +### Features + +* **codetable:** remove `identity` hasher ([#289](https://github.com/multiformats/rust-multihash/issues/289)) ([8473e2f](https://github.com/multiformats/rust-multihash/commit/8473e2f7ecdc0838a3f35d0ecb1935b4c70797c2)) +* Serde serialize Multihash in bytes representation ([#302](https://github.com/multiformats/rust-multihash/issues/302)) ([1023226](https://github.com/multiformats/rust-multihash/commit/10232266c01aa83190af62ad6aeebf63bb7a16c7)) + + +### Bug Fixes + +* avoid possible panic in error handling code ([#277](https://github.com/multiformats/rust-multihash/issues/277)) ([5dc1dfa](https://github.com/multiformats/rust-multihash/commit/5dc1dfac0235e63e9ad80572e6b73f8fcd301ec3)) +* don't panic on non minimal varints ([#291](https://github.com/multiformats/rust-multihash/issues/291)) ([6ef6040](https://github.com/multiformats/rust-multihash/commit/6ef604012b84d5c15d4f3c66a28ead96afedf158)), closes [#282](https://github.com/multiformats/rust-multihash/issues/282) +* expose `MultihashDigest` trait in codetable ([#304](https://github.com/multiformats/rust-multihash/issues/304)) ([50b43cd](https://github.com/multiformats/rust-multihash/commit/50b43cdbba5492923ffb31bb197930d2f3e2cf14)) + + +### Code Refactoring + +* split crates into multiple to isolate breaking changes ([#272](https://github.com/multiformats/rust-multihash/issues/272)) ([954e523](https://github.com/multiformats/rust-multihash/commit/954e5233d273a2b7d682fd087178203628d131a4)) + +### Migrating + +When upgrading to `v0.19`, consider the following: + +- `Code` has moved from `multihash::Code` to `multihash_codetable::Code`. It's strongly recommended to define your own code table using `multihash_derive`. Check the [custom codetable example](codetable/examples/custom_table.rs) on how to use it. For the simplest migration, use the `multihash_codetable::Code`. + + **Before** + + ```rust + use multihash::{Code, MultihashDigest}; + + fn main() { + let hash = Code::Sha2_256.digest(b"hello, world!"); + println!("{:?}", hash); + } + ``` + + **After** + + ```rust + use multihash_codetable::{Code, MultihashDigest}; + + fn main() { + let hash = Code::Sha2_256.digest(b"hello, world!"); + println!("{:?}", hash); + } + ``` + + If you get compile errors, make sure you have the correct features enabled. In this case it would be the `sha2` and `digest` features. + +- `multihash::Multihash` now requires the size of its internal buffer as a const-generic. + You can migrate your existing code by defining the following type-alias: + + ```rust + type Multihash = multihash::Multihash<64>; + ``` + +- The `identity` hasher has been removed completely. + + **Before** + + ```rust + use multihash::{Code, MultihashDigest}; + + fn main() { + let hash = Code::Identity.digest(b"hello, world!"); + println!("{:?}", hash); + } + ``` + + **After** + + ```rust + use multihash::Multihash; + + const IDENTITY_HASH_CODE: u64 = 0x00; + + fn main() { + let hash = Multihash::<64>::wrap(IDENTITY_HASH_CODE, b"hello, world!").unwrap(); + println!("{:?}", hash); + } + ``` + + Check the [identity example](examples/identity.rs) for more information on how to replicate the functionality. + + +## [v0.18.1](https://github.com/multiformats/rust-multihash/compare/v0.18.0...v0.18.1) (2023-04-14) + + +### Bug Fixes + +* don't panic on non minimal varints ([#293](https://github.com/multiformats/rust-multihash/issues/293)) ([c3445fc](https://github.com/multiformats/rust-multihash/commit/c3445fc5041b0fc573945321ebd4b0cdffe0daa5)), closes [#282](https://github.com/multiformats/rust-multihash/issues/282) + + +## [0.18.0](https://github.com/multiformats/rust-multihash/compare/v0.17.0...v0.18.0) (2022-12-06) + + +### ⚠ BREAKING CHANGES + +* update to Rust edition 2021 +* `Multihash::write()` returns bytes written + + Prior to this change it returned an empty tuple `()`, now it returns +the bytes written. + +### Features + +* add `encoded_len` and bytes written ([#252](https://github.com/multiformats/rust-multihash/issues/252)) ([b3cc43e](https://github.com/multiformats/rust-multihash/commit/b3cc43ecb6f9c59da774b094853d6542430d55ad)) + + +### Bug Fixes + +* remove Nix support ([#254](https://github.com/multiformats/rust-multihash/issues/254)) ([ebf57dd](https://github.com/multiformats/rust-multihash/commit/ebf57ddb82be2d2fd0a2f00666b0f888d4c78e1b)), closes [#247](https://github.com/multiformats/rust-multihash/issues/247) +* update to Rust edition 2021 ([#255](https://github.com/multiformats/rust-multihash/issues/255)) ([da53376](https://github.com/multiformats/rust-multihash/commit/da53376e0d9cf2d82d6c0d10590a77991cb3a6b6)) + diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/Cargo.toml new file mode 100644 index 000000000000..012c03ee66d4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/Cargo.toml @@ -0,0 +1,109 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.64" +name = "multihash" +version = "0.19.2" +authors = [ + "dignifiedquire ", + "David Craven ", + "Volker Mische ", +] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Implementation of the multihash format" +documentation = "https://docs.rs/multihash/" +readme = "README.md" +keywords = [ + "multihash", + "ipfs", +] +license = "MIT" +repository = "https://github.com/multiformats/rust-multihash" + +[lib] +name = "multihash" +path = "src/lib.rs" + +[[example]] +name = "identity" +path = "examples/identity.rs" + +[dependencies.arbitrary] +version = "1.1.0" +optional = true + +[dependencies.core2] +version = "0.4.0" +default-features = false + +[dependencies.parity-scale-codec] +version = "3.0.0" +features = ["derive"] +optional = true +default-features = false + +[dependencies.quickcheck] +version = "1.0.3" +optional = true + +[dependencies.rand] +version = "0.8.5" +features = ["small_rng"] +optional = true + +[dependencies.serde] +version = "1.0.116" +optional = true +default-features = false + +[dependencies.unsigned-varint] +version = "0.8.0" +default-features = false + +[dev-dependencies.arbitrary] +version = "1.1.0" + +[dev-dependencies.hex] +version = "0.4.2" + +[dev-dependencies.quickcheck] +version = "1.0.3" + +[dev-dependencies.rand] +version = "0.8.5" + +[dev-dependencies.serde_json] +version = "1.0.58" + +[dev-dependencies.serde_test] +version = "1.0.160" + +[features] +alloc = [] +arb = [ + "dep:quickcheck", + "dep:rand", + "dep:arbitrary", +] +default = ["std"] +scale-codec = ["dep:parity-scale-codec"] +serde = ["dep:serde"] +serde-codec = ["serde"] +std = [ + "unsigned-varint/std", + "alloc", +] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/Cargo.toml.orig new file mode 100644 index 000000000000..1246a6617fde --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/Cargo.toml.orig @@ -0,0 +1,43 @@ +[workspace] +members = ["derive", "derive-impl", ".", "codetable"] +resolver = "2" + +[package] +name = "multihash" +description = "Implementation of the multihash format" +repository = "https://github.com/multiformats/rust-multihash" +keywords = ["multihash", "ipfs"] +version = "0.19.2" +authors = ["dignifiedquire ", "David Craven ", "Volker Mische "] +license = "MIT" +readme = "README.md" +documentation = "https://docs.rs/multihash/" +edition = "2021" +rust-version = "1.64" + +[features] +default = ["std"] +std = ["unsigned-varint/std", "alloc"] +alloc = [] +arb = ["dep:quickcheck", "dep:rand", "dep:arbitrary"] +scale-codec = ["dep:parity-scale-codec"] +serde-codec = ["serde"] # Deprecated, don't use. +serde = ["dep:serde"] + +[dependencies] +parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"], optional = true } +quickcheck = { version = "1.0.3", optional = true } +rand = { version = "0.8.5", optional = true, features = ["small_rng"] } +serde = { version = "1.0.116", optional = true, default-features = false } +unsigned-varint = { version = "0.8.0", default-features = false } +arbitrary = { version = "1.1.0", optional = true } + +core2 = { version = "0.4.0", default-features = false } + +[dev-dependencies] +hex = "0.4.2" +serde_json = "1.0.58" +quickcheck = "1.0.3" +rand = "0.8.5" +arbitrary = "1.1.0" +serde_test = "1.0.160" diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/LICENSE b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/LICENSE new file mode 100644 index 000000000000..233fd7bd7d5a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (C) 2015-2016 Friedel Ziegelmayer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Status API Training Shop Blog About Pricing diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/README.md b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/README.md new file mode 100644 index 000000000000..5afd9eba8083 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/README.md @@ -0,0 +1,107 @@ +# rust-multihash + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) + +[![Build Status](https://github.com/multiformats/rust-multihash/workflows/build/badge.svg)](https://github.com/multiformats/rust-multihash/actions) +[![Crates.io](https://img.shields.io/crates/v/multihash?style=flat-square)](https://crates.io/crates/multihash) +[![License](https://img.shields.io/crates/l/multihash?style=flat-square)](LICENSE) +[![Documentation](https://docs.rs/multihash/badge.svg?style=flat-square)](https://docs.rs/multihash) +[![Dependency Status](https://deps.rs/repo/github/multiformats/rust-multihash/status.svg)](https://deps.rs/repo/github/multiformats/rust-multihash) +[![Coverage Status]( https://img.shields.io/codecov/c/github/multiformats/rust-multihash?style=flat-square)](https://codecov.io/gh/multiformats/rust-multihash) + +> [multihash](https://github.com/multiformats/multihash) implementation in Rust. + +## Table of Contents + - [Install](#install) + - [Usage](#usage) + - [Supported Hash Types](#supported-hash-types) + - [Maintainers](#maintainers) + - [Contribute](#contribute) + - [License](#license) + +## Install + +First add this to your `Cargo.toml` + +```toml +[dependencies] +multihash = "*" +``` + +Then run `cargo build`. + +## MSRV + +The minimum supported Rust version for this library is `1.64.0`. +This is only guaranteed without additional features activated. + +## Usage + +The `multihash` crate exposes a basic data structure for encoding and decoding multihash. +It does not provide any hashing functionality itself. +`Multihash` uses const-generics to define the internal buffer size. +You should set this to the maximum size of the digest you want to support. + +```rust +use multihash::Multihash; + +const SHA2_256: u64 = 0x12; + +fn main() { + let hash = Multihash::<64>::wrap(SHA2_256, b"my digest"); + println!("{:?}", hash); +} +``` + +### Using a custom code table + +You can derive your own application specific code table using the `multihash-derive` crate. +The `multihash-codetable` provides predefined hasher implementations if you don't want to implement your own. + +```rust +use multihash_derive::MultihashDigest; + +#[derive(Clone, Copy, Debug, Eq, MultihashDigest, PartialEq)] +#[mh(alloc_size = 64)] +pub enum Code { + #[mh(code = 0x01, hasher = multihash_codetable::Sha2_256)] + Foo, + #[mh(code = 0x02, hasher = multihash_codetable::Sha2_512)] + Bar, +} + +fn main() { + let hash = Code::Foo.digest(b"my hash"); + println!("{:02x?}", hash); +} +``` + +## Supported Hash Types + +* `SHA1` +* `SHA2-256` +* `SHA2-512` +* `SHA3`/`Keccak` +* `Blake2b-256`/`Blake2b-512`/`Blake2s-128`/`Blake2s-256` +* `Blake3` +* `Strobe` + +## Maintainers + +Captain: [@dignifiedquire](https://github.com/dignifiedquire). + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/rust-multihash/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + + +## License + +[MIT](LICENSE) diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/RELEASE.md b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/RELEASE.md new file mode 100644 index 000000000000..43de960affbb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/RELEASE.md @@ -0,0 +1,53 @@ +Release process +=============== + +Generating Changelog +-------------------- + +Install dependencies + +```sh +$ npm install -g conventional-changelog-cli +$ cd rust-multihash +$ conventional-changelog --preset conventionalcommits +``` + +Add the output of that to `CHANGELOG.md`. Write a human-centric summary of changes and add migration instructions for breaking changes if needed. + +Update the linked output to reference the new version, which conventional-changelog doesn't know about: + +```md +# [](https://github.com/multiformats/rust-multihash/compare/v0.17.0...v) (2022-12-06) +``` +becomes: +```md +# [v0.18.0](https://github.com/multiformats/rust-multihash/compare/v0.17.0...v0.18.0) (2022-12-06) +``` + +Create a pull request with the changelog changes and the correct version bumps to the crates. + + +Publishing +---------- + +Once the PR above is merged, the crate can be published. This is done using [`cargo-release`](https://github.com/crate-ci/cargo-release). + +This requires the following permissions + +- on github.com/multiformats/rust-multihash + - creating tags + - pushing to `master` +- on crates.io + - publish access to all published crates + +Dry run + +```sh +$ cargo release --workspace +``` + +Actual publishing + +```sh +$ cargo release --workspace --execute +``` diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/deny.toml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/deny.toml new file mode 100644 index 000000000000..5ee1e0875b55 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/deny.toml @@ -0,0 +1,32 @@ +[licenses] +allow = [ + "Apache-2.0", + "BSD-3-Clause", + "MIT", + "Unicode-DFS-2016", +] +confidence-threshold = 1.0 + +[bans] +allow = [] +deny = [] +# TODO before changing to 'deny': +# * Resolve duplicate versions of itoa (used by criterion) +# * Remove atty from criterion +# * Resolve duplicate versions of constant_time_eq (used by blake2_sid and blake3) +multiple-versions = "deny" +skip = [] +skip-tree = [] +wildcards = "deny" +allow-wildcard-paths = true + +[sources] +allow-git = [] +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +unknown-git = "deny" +unknown-registry = "deny" + +[sources.allow-org] +github = [] +gitlab = [] +bitbucket = [] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/examples/identity.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/examples/identity.rs new file mode 100644 index 000000000000..70983e62aab8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/examples/identity.rs @@ -0,0 +1,25 @@ +//! An example for how to use the "identity" hash of [`Multihash`]. +//! +//! Identity hashing means we don't actually perform any hashing. +//! Instead, we just store data directly in place of the "digest". +//! +//! [`Multihash::wrap`] returns an error in case the provided digest is too big for the available space. +//! Make sure you construct a [`Multihash`] with a large enough buffer for your data. +//! +//! Typically, the way you want to use the "identity" hash is: +//! 1. Check if your data is smaller than whatever buffer size you chose. +//! 2. If yes, store the data inline. +//! 3. If no, hash it make it fit into the provided buffer. + +use multihash::Multihash; + +/// See for reference. +const IDENTITY_HASH_CODE: u64 = 0; + +fn main() { + let identity_hash = Multihash::<64>::wrap(IDENTITY_HASH_CODE, b"foobar").unwrap(); + let wrap_err = Multihash::<2>::wrap(IDENTITY_HASH_CODE, b"foobar").unwrap_err(); + + assert_eq!(identity_hash.digest(), b"foobar"); + assert_eq!(wrap_err.to_string(), "Invalid multihash size 6."); +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/release.toml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/release.toml new file mode 100644 index 000000000000..7fe66589fa7d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/release.toml @@ -0,0 +1 @@ +consolidate-commits = false diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/rustfmt.toml b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/rustfmt.toml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/rustfmt.toml rename to third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/rustfmt.toml diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/arb.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/arb.rs new file mode 100644 index 000000000000..c279484a1f29 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/arb.rs @@ -0,0 +1,82 @@ +use quickcheck::Gen; +use rand::{ + distributions::{weighted::WeightedIndex, Distribution}, + Rng, RngCore, SeedableRng, +}; + +use crate::Multihash; +use arbitrary::{size_hint, Unstructured}; + +/// Generates a random valid multihash. +impl quickcheck::Arbitrary for Multihash { + fn arbitrary(g: &mut Gen) -> Multihash { + // In real world lower multihash codes are more likely to happen, hence distribute them + // with bias towards smaller values. + let weights = [128, 64, 32, 16, 8, 4, 2, 1]; + let dist = WeightedIndex::new(weights.iter()).unwrap(); + let mut rng = rand::rngs::SmallRng::seed_from_u64(u64::arbitrary(g)); + let code = match dist.sample(&mut rng) { + 0 => rng.gen_range(0..u64::pow(2, 7)), + 1 => rng.gen_range(u64::pow(2, 7)..u64::pow(2, 14)), + 2 => rng.gen_range(u64::pow(2, 14)..u64::pow(2, 21)), + 3 => rng.gen_range(u64::pow(2, 21)..u64::pow(2, 28)), + 4 => rng.gen_range(u64::pow(2, 28)..u64::pow(2, 35)), + 5 => rng.gen_range(u64::pow(2, 35)..u64::pow(2, 42)), + 6 => rng.gen_range(u64::pow(2, 42)..u64::pow(2, 49)), + 7 => rng.gen_range(u64::pow(2, 56)..u64::pow(2, 63)), + _ => unreachable!(), + }; + + // Maximum size is S byte due to the generic. + let size = rng.gen_range(0..S); + let mut data = [0; S]; + rng.fill_bytes(&mut data); + Multihash::wrap(code, &data[..size]).unwrap() + } +} + +impl<'a, const S: usize> arbitrary::Arbitrary<'a> for Multihash { + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let mut code = 0u64; + let mut len_choice = u.arbitrary::()? | 1; + + while len_choice & 1 == 1 { + len_choice >>= 1; + + let x = u.arbitrary::(); + let next = code + .checked_shl(8) + .zip(x.ok()) + .map(|(next, x)| next.saturating_add(x as u64)); + + match next { + None => break, + Some(next) => code = next, + } + } + + let size = u.int_in_range(0..=S)?; + let data = u.bytes(size)?; + + Ok(Multihash::wrap(code, data).unwrap()) + } + + fn size_hint(depth: usize) -> (usize, Option) { + size_hint::and(<[u8; 3]>::size_hint(depth), (0, Some(S + 8))) + } +} + +#[cfg(test)] +mod tests { + use crate::Multihash; + use arbitrary::{Arbitrary, Unstructured}; + + #[test] + fn arbitrary() { + let mut u = Unstructured::new(&[2, 4, 13, 5, 6, 7, 8, 9, 6]); + + let mh = as Arbitrary>::arbitrary(&mut u).unwrap(); + let mh2 = Multihash::<16>::wrap(1037, &[6, 7, 8, 9, 6]).unwrap(); + assert_eq!(mh, mh2); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/error.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/error.rs new file mode 100644 index 000000000000..8dbc6abf57e2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/error.rs @@ -0,0 +1,96 @@ +#[cfg(not(feature = "std"))] +use core2::{error::Error as StdError, io}; +#[cfg(feature = "std")] +use std::{error::Error as StdError, io}; + +use unsigned_varint::decode; + +/// Opaque error struct for operations involving a [`Multihash`](crate::Multihash). +#[derive(Debug)] +pub struct Error { + kind: Kind, +} + +impl Error { + pub(crate) const fn invalid_size(size: u64) -> Self { + Self { + kind: Kind::InvalidSize(size), + } + } + + #[cfg(not(feature = "std"))] + pub(crate) const fn insufficient_varint_bytes() -> Self { + Self { + kind: Kind::Varint(decode::Error::Insufficient), + } + } + + #[cfg(not(feature = "std"))] + pub(crate) const fn varint_overflow() -> Self { + Self { + kind: Kind::Varint(decode::Error::Overflow), + } + } +} + +impl core::fmt::Display for Error { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + self.kind.fmt(f) + } +} + +#[derive(Debug)] +enum Kind { + /// Io error. + Io(io::Error), + /// Invalid multihash size. + InvalidSize(u64), + /// Invalid varint. + Varint(decode::Error), +} + +#[cfg(feature = "std")] +pub(crate) fn unsigned_varint_to_multihash_error(err: unsigned_varint::io::ReadError) -> Error { + match err { + unsigned_varint::io::ReadError::Io(err) => io_to_multihash_error(err), + unsigned_varint::io::ReadError::Decode(err) => Error { + kind: Kind::Varint(err), + }, + other => io_to_multihash_error(io::Error::new(io::ErrorKind::Other, other)), + } +} + +#[cfg(not(feature = "std"))] +pub(crate) fn unsigned_varint_decode_to_multihash_error( + err: unsigned_varint::decode::Error, +) -> Error { + Error { + kind: Kind::Varint(err), + } +} + +pub(crate) fn io_to_multihash_error(err: io::Error) -> Error { + Error { + kind: Kind::Io(err), + } +} + +impl core::fmt::Display for Kind { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + match self { + Self::Io(err) => write!(f, "{err}"), + Self::InvalidSize(size) => write!(f, "Invalid multihash size {size}."), + Self::Varint(err) => write!(f, "{err}"), + } + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match &self.kind { + Kind::Io(inner) => Some(inner), + Kind::InvalidSize(_) => None, + Kind::Varint(_) => None, // FIXME: Does not implement `core2::Error`. + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/lib.rs new file mode 100644 index 000000000000..1f88f21c9d79 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/lib.rs @@ -0,0 +1,44 @@ +//! Bare-minimum multihash data structure. +//! +//! This crate defines a `no_std` compatible data structures for representing a `Multihash`. +//! +//! It does not offer any hashing, instead you are encouraged to either do the hashing yourself. +//! Alternatively, you can use an existing code table or make your own code table. +//! +//! The [`multihash-codetable`] crate defines a set of hashes to get started quickly. +//! To make your own codetable, use the [`multihash-derive`] crate. +//! +//! The `arb` feature flag enables the quickcheck arbitrary implementation for property based +//! testing. +//! +//! For serializing the multihash there is support for [Serde] via the `serde-codec` feature and +//! the [SCALE Codec] via the `scale-codec` feature. +//! +//! [Serde]: https://serde.rs +//! [SCALE Codec]: https://github.com/paritytech/parity-scale-codec +//! [`multihash-derive`]: https://docs.rs/multihash-derive +//! [`multihash-codetable`]: https://docs.rs/multihash-codetable + +#![deny(missing_docs, unsafe_code)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(feature = "alloc")] +extern crate alloc; + +#[cfg(any(test, feature = "arb"))] +mod arb; +mod error; +mod multihash; +#[cfg(feature = "serde")] +mod serde; + +/// Multihash result. +#[deprecated(note = "Use `Result` instead")] +pub type Result = core::result::Result; + +pub use crate::error::Error; +pub use crate::multihash::Multihash; + +/// Deprecated type-alias for the [`Multihash`] type. +#[deprecated(since = "0.18.0", note = "Use `multihash::Multihash instead.")] +pub type MultihashGeneric = Multihash; diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/multihash.rs new file mode 100644 index 000000000000..a18a6f21f544 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/multihash.rs @@ -0,0 +1,355 @@ +use crate::Error; +#[cfg(feature = "alloc")] +use alloc::vec::Vec; + +use core::convert::TryInto; +use core::fmt::Debug; + +use unsigned_varint::encode as varint_encode; + +#[cfg(feature = "std")] +use std::io; + +#[cfg(not(feature = "std"))] +use core2::io; + +/// A Multihash instance that only supports the basic functionality and no hashing. +/// +/// With this Multihash implementation you can operate on Multihashes in a generic way, but +/// no hasher implementation is associated with the code. +/// +/// # Example +/// +/// ``` +/// use multihash::Multihash; +/// +/// const Sha3_256: u64 = 0x16; +/// let digest_bytes = [ +/// 0x16, 0x20, 0x64, 0x4b, 0xcc, 0x7e, 0x56, 0x43, 0x73, 0x04, 0x09, 0x99, 0xaa, 0xc8, 0x9e, +/// 0x76, 0x22, 0xf3, 0xca, 0x71, 0xfb, 0xa1, 0xd9, 0x72, 0xfd, 0x94, 0xa3, 0x1c, 0x3b, 0xfb, +/// 0xf2, 0x4e, 0x39, 0x38, +/// ]; +/// let mh = Multihash::<32>::from_bytes(&digest_bytes).unwrap(); +/// assert_eq!(mh.code(), Sha3_256); +/// assert_eq!(mh.size(), 32); +/// assert_eq!(mh.digest(), &digest_bytes[2..]); +/// ``` +#[derive(Clone, Copy, Debug, Eq, Ord, PartialOrd)] +pub struct Multihash { + /// The code of the Multihash. + code: u64, + /// The actual size of the digest in bytes (not the allocated size). + size: u8, + /// The digest. + digest: [u8; S], +} + +impl Default for Multihash { + fn default() -> Self { + Self { + code: 0, + size: 0, + digest: [0; S], + } + } +} + +impl Multihash { + /// Wraps the digest in a multihash. + pub const fn wrap(code: u64, input_digest: &[u8]) -> Result { + if input_digest.len() > S { + return Err(Error::invalid_size(input_digest.len() as _)); + } + let size = input_digest.len(); + let mut digest = [0; S]; + let mut i = 0; + while i < size { + digest[i] = input_digest[i]; + i += 1; + } + Ok(Self { + code, + size: size as u8, + digest, + }) + } + + /// Returns the code of the multihash. + pub const fn code(&self) -> u64 { + self.code + } + + /// Returns the size of the digest. + pub const fn size(&self) -> u8 { + self.size + } + + /// Returns the digest. + pub fn digest(&self) -> &[u8] { + &self.digest[..self.size as usize] + } + + /// Reads a multihash from a byte stream. + pub fn read(r: R) -> Result + where + Self: Sized, + { + let (code, size, digest) = read_multihash(r)?; + Ok(Self { code, size, digest }) + } + + /// Parses a multihash from a bytes. + /// + /// You need to make sure the passed in bytes have the correct length. The digest length + /// needs to match the `size` value of the multihash. + pub fn from_bytes(mut bytes: &[u8]) -> Result + where + Self: Sized, + { + let result = Self::read(&mut bytes)?; + // There were more bytes supplied than read + if !bytes.is_empty() { + return Err(Error::invalid_size(bytes.len().try_into().expect( + "Currently the maximum size is 255, therefore always fits into usize", + ))); + } + + Ok(result) + } + + /// Writes a multihash to a byte stream, returning the written size. + pub fn write(&self, w: W) -> Result { + write_multihash(w, self.code(), self.size(), self.digest()) + } + + /// Returns the length in bytes needed to encode this multihash into bytes. + pub fn encoded_len(&self) -> usize { + let mut code_buf = varint_encode::u64_buffer(); + let code = varint_encode::u64(self.code, &mut code_buf); + + let mut size_buf = varint_encode::u8_buffer(); + let size = varint_encode::u8(self.size, &mut size_buf); + + code.len() + size.len() + usize::from(self.size) + } + + #[cfg(feature = "alloc")] + /// Returns the bytes of a multihash. + pub fn to_bytes(&self) -> Vec { + let mut bytes = Vec::with_capacity(self.size().into()); + let written = self + .write(&mut bytes) + .expect("writing to a vec should never fail"); + debug_assert_eq!(written, bytes.len()); + bytes + } + + /// Truncates the multihash to the given size. It's up to the caller to ensure that the new size + /// is secure (cryptographically) to use. + /// + /// If the new size is larger than the current size, this method does nothing. + pub fn truncate(&self, size: u8) -> Self { + let mut mh = *self; + mh.size = mh.size.min(size); + mh + } + + /// Resizes the backing multihash buffer. + /// + /// This function fails if the hash digest is larger than the target size. + pub fn resize(&self) -> Result, Error> { + let size = self.size as usize; + if size > R { + return Err(Error::invalid_size(self.size as u64)); + } + let mut mh = Multihash { + code: self.code, + size: self.size, + digest: [0; R], + }; + mh.digest[..size].copy_from_slice(&self.digest[..size]); + Ok(mh) + } + + /// Decomposes struct, useful when needing a `Sized` array or moving all the data into another type + /// + /// It is recommended to use `digest()` `code()` and `size()` for most cases. + pub fn into_inner(self) -> (u64, [u8; S], u8) { + let Self { code, digest, size } = self; + (code, digest, size) + } +} + +// Don't hash the whole allocated space, but just the actual digest +#[allow(clippy::derived_hash_with_manual_eq)] +impl core::hash::Hash for Multihash { + fn hash(&self, state: &mut T) { + self.code.hash(state); + self.digest().hash(state); + } +} + +#[cfg(feature = "alloc")] +impl From> for Vec { + fn from(multihash: Multihash) -> Self { + multihash.to_bytes() + } +} + +impl PartialEq> for Multihash { + fn eq(&self, other: &Multihash) -> bool { + // NOTE: there's no need to explicitly check the sizes, that's implicit in the digest. + self.code == other.code && self.digest() == other.digest() + } +} + +#[cfg(feature = "scale-codec")] +impl parity_scale_codec::Encode for Multihash { + fn encode_to(&self, dest: &mut EncOut) { + self.code.encode_to(dest); + self.size.encode_to(dest); + // **NOTE** We write the digest directly to dest, since we have known the size of digest. + // + // We do not choose to encode &[u8] directly, because it will add extra bytes (the compact length of digest). + // For a valid multihash, the length of digest must equal to `size`. + // Therefore, we can only read raw bytes whose length is equal to `size` when decoding. + dest.write(self.digest()); + } +} + +#[cfg(feature = "scale-codec")] +impl parity_scale_codec::EncodeLike for Multihash {} + +#[cfg(feature = "scale-codec")] +impl parity_scale_codec::Decode for Multihash { + fn decode( + input: &mut DecIn, + ) -> Result { + let mut mh = Multihash { + code: parity_scale_codec::Decode::decode(input)?, + size: parity_scale_codec::Decode::decode(input)?, + digest: [0; S], + }; + if mh.size as usize > S { + return Err(parity_scale_codec::Error::from("invalid size")); + } + // For a valid multihash, the length of digest must equal to the size. + input.read(&mut mh.digest[..mh.size as usize])?; + Ok(mh) + } +} + +/// Writes the multihash to a byte stream. +fn write_multihash(mut w: W, code: u64, size: u8, digest: &[u8]) -> Result +where + W: io::Write, +{ + let mut code_buf = varint_encode::u64_buffer(); + let code = varint_encode::u64(code, &mut code_buf); + + let mut size_buf = varint_encode::u8_buffer(); + let size = varint_encode::u8(size, &mut size_buf); + + let written = code.len() + size.len() + digest.len(); + + w.write_all(code) + .map_err(crate::error::io_to_multihash_error)?; + w.write_all(size) + .map_err(crate::error::io_to_multihash_error)?; + w.write_all(digest) + .map_err(crate::error::io_to_multihash_error)?; + + Ok(written) +} + +/// Reads a multihash from a byte stream that contains a full multihash (code, size and the digest) +/// +/// Returns the code, size and the digest. The size is the actual size and not the +/// maximum/allocated size of the digest. +/// +/// Currently the maximum size for a digest is 255 bytes. +fn read_multihash(mut r: R) -> Result<(u64, u8, [u8; S]), Error> +where + R: io::Read, +{ + let code = read_u64(&mut r)?; + let size = read_u64(&mut r)?; + + if size > S as u64 || size > u8::MAX as u64 { + return Err(Error::invalid_size(size)); + } + + let mut digest = [0; S]; + r.read_exact(&mut digest[..size as usize]) + .map_err(crate::error::io_to_multihash_error)?; + Ok((code, size as u8, digest)) +} + +#[cfg(feature = "std")] +pub(crate) fn read_u64(r: R) -> Result { + unsigned_varint::io::read_u64(r).map_err(crate::error::unsigned_varint_to_multihash_error) +} + +/// Reads 64 bits from a byte array into a u64 +/// Adapted from unsigned-varint's generated read_u64 function at +/// https://github.com/paritytech/unsigned-varint/blob/master/src/io.rs +#[cfg(not(feature = "std"))] +pub(crate) fn read_u64(mut r: R) -> Result { + use unsigned_varint::decode; + let mut b = varint_encode::u64_buffer(); + for i in 0..b.len() { + let n = r + .read(&mut (b[i..i + 1])) + .map_err(crate::error::io_to_multihash_error)?; + if n == 0 { + return Err(Error::insufficient_varint_bytes()); + } else if decode::is_last(b[i]) { + return decode::u64(&b[..=i]) + .map(|decoded| decoded.0) + .map_err(crate::error::unsigned_varint_decode_to_multihash_error); + } + } + Err(Error::varint_overflow()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[cfg(feature = "scale-codec")] + fn test_scale() { + use parity_scale_codec::{Decode, Encode}; + + let mh1 = Multihash::<32>::wrap(0, b"hello world").unwrap(); + // println!("mh1: code = {}, size = {}, digest = {:?}", mh1.code(), mh1.size(), mh1.digest()); + let mh1_bytes = mh1.encode(); + // println!("Multihash<32>: {}", hex::encode(&mh1_bytes)); + let mh2: Multihash<32> = Decode::decode(&mut &mh1_bytes[..]).unwrap(); + assert_eq!(mh1, mh2); + + let mh3 = Multihash::<64>::wrap(0, b"hello world").unwrap(); + // println!("mh3: code = {}, size = {}, digest = {:?}", mh3.code(), mh3.size(), mh3.digest()); + let mh3_bytes = mh3.encode(); + // println!("Multihash<64>: {}", hex::encode(&mh3_bytes)); + let mh4: Multihash<64> = Decode::decode(&mut &mh3_bytes[..]).unwrap(); + assert_eq!(mh3, mh4); + + assert_eq!(mh1_bytes, mh3_bytes); + } + + #[test] + fn test_eq_sizes() { + let mh1 = Multihash::<32>::default(); + let mh2 = Multihash::<64>::default(); + assert_eq!(mh1, mh2); + } + + #[test] + fn decode_non_minimal_error() { + // This is a non-minimal varint. + let data = [241, 0, 0, 0, 0, 0, 128, 132, 132, 132, 58]; + let result = read_u64(&data[..]); + assert!(result.is_err()); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/serde.rs b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/serde.rs new file mode 100644 index 000000000000..65709a315d49 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-0.19.2/src/serde.rs @@ -0,0 +1,250 @@ +//! Multihash Serde (de)serialization + +use core::{fmt, mem, slice}; + +use serde::{ + de::{self, SeqAccess, Visitor}, + ser, Deserialize, Deserializer, Serialize, Serializer, +}; + +use crate::Multihash; + +/// The maximum serialization size of `code` is 9 bytes (a large varint encoded u64) and for `size` +/// is 2 bytes (a large varint encoded u8), this makes a total of 11 bytes. +const MAXIMUM_PREFIX_SIZE: usize = 11; + +/// The is currently no way to allocate an array that is some constant size bigger then a given +/// const generic. Once `generic_const_exprs` are a thing, this struct will no longer be needed. +/// Until then we introduce a hack. We allocate a struct, which contains two independent arrays, +/// which can be specified with const generics. We then treat the whole struct as a slice of +/// continuous memory. +#[repr(C, packed)] +struct Buffer { + first: [u8; SIZE_FIRST], + second: [u8; SIZE_SECOND], +} + +#[allow(unsafe_code)] +impl Buffer { + fn new() -> Self { + Self { + first: [0; SIZE_FIRST], + second: [0; SIZE_SECOND], + } + } + + fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const _ as _, mem::size_of::()) } + } + + fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self as *mut _ as _, mem::size_of::()) } + } +} + +impl Serialize for Multihash { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut buffer = Buffer::::new(); + let bytes_written = self + .write(buffer.as_mut_slice()) + .map_err(|_| ser::Error::custom("Failed to serialize Multihash"))?; + serializer.serialize_bytes(&buffer.as_slice()[..bytes_written]) + } +} + +struct BytesVisitor; + +impl<'de, const SIZE: usize> Visitor<'de> for BytesVisitor { + type Value = Multihash; + + fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "a valid Multihash in bytes") + } + + fn visit_bytes(self, bytes: &[u8]) -> Result + where + E: de::Error, + { + Multihash::::from_bytes(bytes) + .map_err(|_| de::Error::custom("Failed to deserialize Multihash")) + } + + // Some Serde data formats interpret a byte stream as a sequence of bytes (e.g. `serde_json`). + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut buffer = Buffer::::new(); + let bytes = buffer.as_mut_slice(); + + // Fill the bytes slices with the given sequence + let mut pos = 0; + while let Some(byte) = seq.next_element()? { + bytes[pos] = byte; + pos += 1; + if pos >= bytes.len() { + return Err(de::Error::custom("Failed to deserialize Multihash")); + } + } + + Multihash::::from_bytes(&bytes[..pos]) + .map_err(|_| de::Error::custom("Failed to deserialize Multihash")) + } +} + +impl<'de, const SIZE: usize> Deserialize<'de> for Multihash { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_bytes(BytesVisitor) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::ptr; + + use serde_test::{assert_tokens, Token}; + + const SHA2_256_CODE: u64 = 0x12; + const DIGEST: [u8; 32] = [ + 159, 228, 204, 198, 222, 22, 114, 79, 58, 48, 199, 232, 242, 84, 243, 198, 71, 25, 134, + 172, 177, 248, 216, 207, 142, 150, 206, 42, 215, 219, 231, 251, + ]; + + #[test] + fn test_serde_json() { + // This is a concatenation of `SHA2_256_CODE + DIGEST_LENGTH + DIGEST`. + let expected_json = format!("[{},{},159,228,204,198,222,22,114,79,58,48,199,232,242,84,243,198,71,25,134,172,177,248,216,207,142,150,206,42,215,219,231,251]", SHA2_256_CODE as u8, DIGEST.len() as u8); + + let mh = Multihash::<32>::wrap(SHA2_256_CODE, &DIGEST).unwrap(); + + let json = serde_json::to_string(&mh).unwrap(); + assert_eq!(json, expected_json); + + let mh_decoded: Multihash<32> = serde_json::from_str(&json).unwrap(); + assert_eq!(mh, mh_decoded); + } + + #[test] + fn test_serde_test() { + // This is a concatenation of `SHA2_256_CODE + DIGEST_LENGTH + DIGEST`. + const ENCODED_MULTIHASH_BYTES: [u8; 34] = [ + SHA2_256_CODE as u8, + DIGEST.len() as u8, + 159, + 228, + 204, + 198, + 222, + 22, + 114, + 79, + 58, + 48, + 199, + 232, + 242, + 84, + 243, + 198, + 71, + 25, + 134, + 172, + 177, + 248, + 216, + 207, + 142, + 150, + 206, + 42, + 215, + 219, + 231, + 251, + ]; + + let mh = Multihash::<32>::wrap(SHA2_256_CODE, &DIGEST).unwrap(); + + // As bytes. + assert_tokens(&mh, &[Token::Bytes(&ENCODED_MULTIHASH_BYTES)]); + + // As sequence. + serde_test::assert_de_tokens( + &mh, + &[ + Token::Seq { len: Some(34) }, + Token::U8(SHA2_256_CODE as u8), + Token::U8(DIGEST.len() as u8), + Token::U8(159), + Token::U8(228), + Token::U8(204), + Token::U8(198), + Token::U8(222), + Token::U8(22), + Token::U8(114), + Token::U8(79), + Token::U8(58), + Token::U8(48), + Token::U8(199), + Token::U8(232), + Token::U8(242), + Token::U8(84), + Token::U8(243), + Token::U8(198), + Token::U8(71), + Token::U8(25), + Token::U8(134), + Token::U8(172), + Token::U8(177), + Token::U8(248), + Token::U8(216), + Token::U8(207), + Token::U8(142), + Token::U8(150), + Token::U8(206), + Token::U8(42), + Token::U8(215), + Token::U8(219), + Token::U8(231), + Token::U8(251), + Token::SeqEnd, + ], + ); + } + + #[test] + fn test_buffer_alignment() { + const SIZE_FIRST: usize = 11; + const SIZE_SECOND: usize = 13; + let buffer = Buffer::::new(); + + // Make sure that the struct allocated continuous memory, as we exploit that fact with the + // `as_slice` and `as_mut_slice()` methods. + let start_first = ptr::addr_of!(buffer.first) as *const u8; + let start_second = ptr::addr_of!(buffer.second) as *const u8; + #[allow(unsafe_code)] + unsafe { + assert_eq!(start_second.offset_from(start_first), SIZE_FIRST as isize); + }; + } + + #[test] + fn test_buffer() { + const SIZE_FIRST: usize = 3; + const SIZE_SECOND: usize = 8; + let mut buffer = Buffer::::new(); + + let data: [u8; SIZE_FIRST + SIZE_SECOND] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + buffer.as_mut_slice().copy_from_slice(&data); + assert_eq!(buffer.as_slice(), data); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/.cargo_vcs_info.json new file mode 100644 index 000000000000..0bcf1234bbfd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "4c0ef5268355308d7f083482dad1c81318db4f6b" + }, + "path_in_vcs": "codetable" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.lock b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.lock new file mode 100644 index 000000000000..b4761acd214a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.lock @@ -0,0 +1,1079 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "blake2b_simd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "blake2s_simd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "blake3" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bbb537bb4a30b90362caddba8f360c0a56bc13d3a5570028e7197204cb54a17" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + +[[package]] +name = "cpufeatures" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +dependencies = [ + "libc", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "derive_arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "indexmap" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "libc" +version = "0.2.159" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "multihash" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +dependencies = [ + "core2", + "unsigned-varint", +] + +[[package]] +name = "multihash-codetable" +version = "0.1.4" +dependencies = [ + "arbitrary", + "blake2b_simd", + "blake2s_simd", + "blake3", + "core2", + "criterion", + "digest", + "hex", + "multihash-derive", + "rand", + "ripemd", + "serde", + "sha1", + "sha2", + "sha3", + "strobe-rs", + "unsigned-varint", +] + +[[package]] +name = "multihash-derive" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f1b7edab35d920890b88643a765fc9bd295cf0201f4154dda231bef9b8404eb" +dependencies = [ + "core2", + "multihash", + "multihash-derive-impl", +] + +[[package]] +name = "multihash-derive-impl" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3dc7141bd06405929948754f0628d247f5ca1865be745099205e5086da957cb" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "oorandom" +version = "11.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "portable-atomic" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest", +] + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "serde" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.128" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "strobe-rs" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98fe17535ea31344936cc58d29fec9b500b0452ddc4cc24c429c8a921a0e84e5" +dependencies = [ + "bitflags", + "byteorder", + "keccak", + "subtle", + "zeroize", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" + +[[package]] +name = "toml_edit" +version = "0.22.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" + +[[package]] +name = "web-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +dependencies = [ + "memchr", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.toml new file mode 100644 index 000000000000..608545a564eb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.toml @@ -0,0 +1,195 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "multihash-codetable" +version = "0.1.4" +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Default multihash code-table with cryptographically secure hash implementations" +readme = false +license = "MIT" +repository = "https://github.com/multiformats/rust-multihash" + +[package.metadata.docs.rs] +features = [ + "std", + "sha1", + "sha2", + "sha3", + "ripemd", + "strobe", + "blake2b", + "blake2s", + "blake3", + "serde", +] +rustdoc-args = [ + "--cfg", + "docs_rs", +] + +[lib] +name = "multihash_codetable" +path = "src/lib.rs" + +[[example]] +name = "custom_table" +path = "examples/custom_table.rs" +required-features = [ + "blake2b", + "sha2", +] + +[[example]] +name = "manual_mh" +path = "examples/manual_mh.rs" +required-features = ["sha2"] + +[[test]] +name = "lib" +path = "tests/lib.rs" +required-features = [ + "sha1", + "sha2", + "sha3", + "ripemd", + "strobe", + "blake2b", + "blake2s", + "blake3", +] + +[[bench]] +name = "multihash" +path = "benches/multihash.rs" +harness = false + +[dependencies.arbitrary] +version = "1.3.2" +features = ["derive"] +optional = true + +[dependencies.blake2b_simd] +version = "1.0.0" +optional = true +default-features = false + +[dependencies.blake2s_simd] +version = "1.0.0" +optional = true +default-features = false + +[dependencies.blake3] +version = "1.2.0" +optional = true +default-features = false + +[dependencies.core2] +version = "0.4.0" +default-features = false + +[dependencies.digest] +version = "0.10.1" +optional = true +default-features = false + +[dependencies.multihash-derive] +version = "0.9.1" +default-features = false + +[dependencies.ripemd] +version = "0.1.1" +optional = true +default-features = false + +[dependencies.serde] +version = "1.0.158" +features = ["derive"] +optional = true +default-features = false + +[dependencies.sha1] +version = "0.10.5" +optional = true +default-features = false + +[dependencies.sha2] +version = "0.10.0" +optional = true +default-features = false + +[dependencies.sha3] +version = "0.10.0" +optional = true +default-features = false + +[dependencies.strobe-rs] +version = "0.10.0" +optional = true +default-features = false + +[dev-dependencies.criterion] +version = "0.5.1" + +[dev-dependencies.hex] +version = "0.4.2" + +[dev-dependencies.rand] +version = "0.8.5" + +[dev-dependencies.unsigned-varint] +version = "0.8.0" +default-features = false + +[features] +arb = [ + "dep:arbitrary", + "std", +] +blake2b = ["dep:blake2b_simd"] +blake2s = ["dep:blake2s_simd"] +blake3 = ["dep:blake3"] +default = ["std"] +ripemd = [ + "dep:digest", + "dep:ripemd", +] +sha1 = [ + "dep:digest", + "dep:sha1", +] +sha2 = [ + "dep:digest", + "dep:sha2", +] +sha3 = [ + "dep:digest", + "dep:sha3", +] +std = [ + "blake2b_simd?/std", + "blake2s_simd?/std", + "blake3?/std", + "digest?/std", + "sha1?/std", + "sha2?/std", + "sha3?/std", + "strobe-rs?/std", + "ripemd?/std", + "multihash-derive/std", + "core2/std", +] +strobe = ["dep:strobe-rs"] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.toml.orig new file mode 100644 index 000000000000..03a470992ce2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/Cargo.toml.orig @@ -0,0 +1,65 @@ +[package] +name = "multihash-codetable" +description = "Default multihash code-table with cryptographically secure hash implementations" +version = "0.1.4" +repository = "https://github.com/multiformats/rust-multihash" +license = "MIT" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["std"] +std = ["blake2b_simd?/std", "blake2s_simd?/std", "blake3?/std", "digest?/std", "sha1?/std", "sha2?/std", "sha3?/std", "strobe-rs?/std", "ripemd?/std", "multihash-derive/std", "core2/std"] +arb = ["dep:arbitrary", "std"] +sha1 = ["dep:digest", "dep:sha1"] +sha2 = ["dep:digest", "dep:sha2"] +sha3 = ["dep:digest", "dep:sha3"] +ripemd = ["dep:digest", "dep:ripemd"] +strobe = ["dep:strobe-rs"] +blake2b = ["dep:blake2b_simd"] +blake2s = ["dep:blake2s_simd"] +blake3 = ["dep:blake3"] + +[dependencies] +blake2b_simd = { version = "1.0.0", default-features = false, optional = true } +blake2s_simd = { version = "1.0.0", default-features = false, optional = true } +blake3 = { version = "1.2.0", default-features = false, optional = true } +digest = { version = "0.10.1", default-features = false, optional = true } +sha1 = { version = "0.10.5", default-features = false, optional = true } +sha2 = { version = "0.10.0", default-features = false, optional = true } +sha3 = { version = "0.10.0", default-features = false, optional = true } +strobe-rs = { version = "0.10.0", default-features = false, optional = true } +ripemd = { version = "0.1.1", default-features = false, optional = true } +multihash-derive = { version = "0.9.1", path = "../derive", default-features = false } +core2 = { version = "0.4.0", default-features = false } +serde = { version = "1.0.158", features = ["derive"], default-features = false, optional = true } +arbitrary = { version = "1.3.2", optional = true, features = ["derive"] } + +[dev-dependencies] +hex = "0.4.2" +unsigned-varint = { version = "0.8.0", default-features = false } +criterion = "0.5.1" +rand = "0.8.5" + +[[bench]] +name = "multihash" +harness = false + +[[test]] +name = "lib" +required-features = ["sha1", "sha2", "sha3", "ripemd", "strobe", "blake2b", "blake2s", "blake3"] + +[[example]] +name = "custom_table" +path = "examples/custom_table.rs" +required-features = ["blake2b", "sha2"] + +[[example]] +name = "manual_mh" +path = "examples/manual_mh.rs" +required-features = ["sha2"] + +[package.metadata.docs.rs] +features = ["std", "sha1", "sha2", "sha3", "ripemd", "strobe", "blake2b", "blake2s", "blake3", "serde"] +rustdoc-args = ["--cfg", "docs_rs"] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/LICENSE b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/LICENSE new file mode 100644 index 000000000000..233fd7bd7d5a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (C) 2015-2016 Friedel Ziegelmayer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Status API Training Shop Blog About Pricing diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/benches/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/benches/multihash.rs new file mode 100644 index 000000000000..735926acb957 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/benches/multihash.rs @@ -0,0 +1,99 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use rand::Rng; + +use multihash_codetable::{ + Blake2b256, Blake2b512, Blake2s128, Blake2s256, Blake3_256, Keccak224, Keccak256, Keccak384, + Keccak512, Sha1, Sha2_256, Sha2_512, Sha3_224, Sha3_256, Sha3_384, Sha3_512, Strobe256, + Strobe512, +}; +use multihash_derive::Hasher; + +macro_rules! group_digest { + ($criterion:ident, $( $id:expr => $hash:ident, $input:expr)* ) => {{ + let mut group = $criterion.benchmark_group("digest"); + $( + group.bench_function($id, |b| { + b.iter(|| { + let mut hasher = $hash::default(); + hasher.update(black_box($input)); + let _ = black_box(hasher.finalize()); + }) + }); + )* + group.finish(); + }}; +} + +macro_rules! group_stream { + ($criterion:ident, $( $id:expr => $hash:ident, $input:expr)* ) => {{ + let mut group = $criterion.benchmark_group("stream"); + $( + group.bench_function($id, |b| { + b.iter(|| { + let input = black_box($input); + let mut hasher = <$hash>::default(); + for i in 0..3 { + let start = i * 256; + hasher.update(&input[start..(start + 256)]); + } + let _ = black_box(hasher.finalize()); + }) + }); + )* + group.finish(); + }}; +} + +fn bench_digest(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + let data: Vec = (0..1024).map(|_| rng.gen()).collect(); + group_digest!(c, + "sha1" => Sha1, &data + "sha2_256" => Sha2_256, &data + "sha2_512" => Sha2_512, &data + "sha3_224" => Sha3_224, &data + "sha3_256" => Sha3_256, &data + "sha3_384" => Sha3_384, &data + "sha3_512" => Sha3_512, &data + "keccak_224" => Keccak224, &data + "keccak_256" => Keccak256, &data + "keccak_384" => Keccak384, &data + "keccak_512" => Keccak512, &data + "blake2b_256" => Blake2b256, &data + "blake2b_512" => Blake2b512, &data + "blake2s_128" => Blake2s128, &data + "blake2s_256" => Blake2s256, &data + "blake3_256" => Blake3_256, &data + "strobe_256" => Strobe256, &data + "strobe_512" => Strobe512, &data + ); +} + +/// Chunks the data into 256-byte slices. +fn bench_stream(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + let data: Vec = (0..1024).map(|_| rng.gen()).collect(); + group_stream!(c, + "sha1" => Sha1, &data + "sha2_256" => Sha2_256, &data + "sha2_512" => Sha2_512, &data + "sha3_224" => Sha3_224, &data + "sha3_256" => Sha3_256, &data + "sha3_384" => Sha3_384, &data + "sha3_512" => Sha3_512, &data + "keccak_224" => Keccak224, &data + "keccak_256" => Keccak256, &data + "keccak_384" => Keccak384, &data + "keccak_512" => Keccak512, &data + "blake2b_256" => Blake2b256, &data + "blake2b_512" => Blake2b512, &data + "blake2s_128" => Blake2s128, &data + "blake2s_256" => Blake2s256, &data + "blake3_256" => Blake3_256, &data + "strobe_256" => Strobe256, &data + "strobe_512" => Strobe512, &data + ); +} + +criterion_group!(benches, bench_digest, bench_stream); +criterion_main!(benches); diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/examples/custom_table.rs b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/examples/custom_table.rs similarity index 84% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/examples/custom_table.rs rename to third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/examples/custom_table.rs index f96c22f5665b..40235d0df578 100644 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/examples/custom_table.rs +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/examples/custom_table.rs @@ -1,7 +1,7 @@ use std::convert::TryFrom; -use multihash::derive::Multihash; -use multihash::{Error, Hasher, MultihashDigest, MultihashGeneric, Sha2_256}; +use multihash_codetable::Sha2_256; +use multihash_derive::{Hasher, MultihashDigest}; // You can implement a custom hasher. This is a SHA2 256-bit hasher that returns a hash that is // truncated to 160 bits. @@ -19,14 +19,14 @@ impl Hasher for Sha2_256Truncated20 { } } -#[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)] +#[derive(Clone, Copy, Debug, Eq, MultihashDigest, PartialEq)] #[mh(alloc_size = 64)] pub enum Code { /// Example for using a custom hasher which returns truncated hashes #[mh(code = 0x12, hasher = Sha2_256Truncated20)] Sha2_256Truncated20, /// Example for using a hasher with a bit size that is not exported by default - #[mh(code = 0xb219, hasher = multihash::Blake2bHasher::<25>)] + #[mh(code = 0xb219, hasher = multihash_codetable::Blake2bHasher::<25>)] Blake2b200, } @@ -34,9 +34,9 @@ fn main() { // Create new hashes from some input data. This is done through the `Code` enum we derived // Multihash from. let blake_hash = Code::Blake2b200.digest(b"hello world!"); - println!("{:02x?}", blake_hash); + println!("{blake_hash:02x?}"); let truncated_sha2_hash = Code::Sha2_256Truncated20.digest(b"hello world!"); - println!("{:02x?}", truncated_sha2_hash); + println!("{truncated_sha2_hash:02x?}"); // Sometimes you might not need to hash new data, you just want to get the information about // a Multihash. diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/examples/manual_mh.rs b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/examples/manual_mh.rs similarity index 77% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/examples/manual_mh.rs rename to third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/examples/manual_mh.rs index 81db713d9fba..c3905591a69e 100644 --- a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/examples/manual_mh.rs +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/examples/manual_mh.rs @@ -1,4 +1,5 @@ -use multihash::{Code, MultihashDigest}; +use multihash_codetable::Code; +use multihash_derive::MultihashDigest; /// prefix/multihash generating tool to aid when adding new tests fn prefix_util() { @@ -14,9 +15,9 @@ fn prefix_util() { let code_hex = hex::encode(&empty[..1]); // change if longer/shorter prefix let len_hex = hex::encode(len); - println!("prefix hex: code: {}, len: {}", code_hex, len_hex); + println!("prefix hex: code: {code_hex}, len: {len_hex}"); - println!("{}{}{}", code_hex, len_hex, hash); + println!("{code_hex}{len_hex}{hash}"); } fn main() { diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/src/hasher_impl.rs b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/src/hasher_impl.rs new file mode 100644 index 000000000000..b00c4e485cc2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/src/hasher_impl.rs @@ -0,0 +1,277 @@ +#[cfg(any( + feature = "strobe", + feature = "blake2b", + feature = "blake2s", + feature = "blake3" +))] +macro_rules! derive_write { + ($name:ident) => { + impl core2::io::Write for $name { + fn write(&mut self, buf: &[u8]) -> core2::io::Result { + use multihash_derive::Hasher as _; + + self.update(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> core2::io::Result<()> { + Ok(()) + } + } + }; +} + +#[cfg(any(feature = "blake2b", feature = "blake2s"))] +macro_rules! derive_hasher_blake { + ($module:ident, $name:ident) => { + /// Multihash hasher. + #[derive(Debug)] + pub struct $name { + state: $module::State, + digest: [u8; S], + } + + impl Default for $name { + fn default() -> Self { + let mut params = $module::Params::new(); + params.hash_length(S); + Self { + state: params.to_state(), + digest: [0; S], + } + } + } + + impl multihash_derive::Hasher for $name { + fn update(&mut self, input: &[u8]) { + self.state.update(input); + } + + fn finalize(&mut self) -> &[u8] { + let digest = self.state.finalize(); + let digest_bytes = digest.as_bytes(); + let digest_out = &mut self.digest[..digest_bytes.len().max(S)]; + digest_out.copy_from_slice(digest_bytes); + digest_out + } + + fn reset(&mut self) { + let Self { state, .. } = Self::default(); + self.state = state; + } + } + + derive_write!($name); + }; +} + +#[cfg(feature = "blake2b")] +pub mod blake2b { + derive_hasher_blake!(blake2b_simd, Blake2bHasher); + + /// 256 bit blake2b hasher. + pub type Blake2b256 = Blake2bHasher<32>; + + /// 512 bit blake2b hasher. + pub type Blake2b512 = Blake2bHasher<64>; +} + +#[cfg(feature = "blake2s")] +pub mod blake2s { + derive_hasher_blake!(blake2s_simd, Blake2sHasher); + + /// 256 bit blake2s hasher. + pub type Blake2s128 = Blake2sHasher<16>; + + /// 512 bit blake2s hasher. + pub type Blake2s256 = Blake2sHasher<32>; +} + +#[cfg(feature = "blake3")] +pub mod blake3 { + /// Multihash hasher. + #[derive(Debug)] + pub struct Blake3Hasher { + hasher: ::blake3::Hasher, + digest: [u8; S], + } + + impl Blake3Hasher { + /// using blake3's XOF function, fills the given slice with hash output + pub fn finalize_xof_fill(&mut self, digest_out: &mut [u8]) { + let mut digest = self.hasher.finalize_xof(); + digest.fill(digest_out) + } + } + + impl Default for Blake3Hasher { + fn default() -> Self { + let hasher = ::blake3::Hasher::new(); + + Self { + hasher, + digest: [0; S], + } + } + } + + impl multihash_derive::Hasher for Blake3Hasher { + fn update(&mut self, input: &[u8]) { + self.hasher.update(input); + } + + fn finalize(&mut self) -> &[u8] { + let mut output = self.hasher.finalize_xof(); + output.fill(&mut self.digest); + &self.digest + } + + fn reset(&mut self) { + self.hasher.reset(); + } + } + + derive_write!(Blake3Hasher); + + /// blake3-256 hasher. + pub type Blake3_256 = Blake3Hasher<32>; +} + +#[cfg(any( + feature = "sha1", + feature = "sha2", + feature = "sha3", + feature = "ripemd" +))] +macro_rules! derive_rustcrypto_hasher { + ($module:ty, $name:ident, $size:expr) => { + /// Multihash hasher. + #[derive(Debug)] + pub struct $name { + state: $module, + digest: [u8; $size], + } + + impl Default for $name { + fn default() -> Self { + $name { + state: Default::default(), + digest: [0; $size], + } + } + } + + impl ::multihash_derive::Hasher for $name { + fn update(&mut self, input: &[u8]) { + use digest::Digest; + self.state.update(input) + } + + fn finalize(&mut self) -> &[u8] { + use digest::Digest; + let digest = self.state.clone().finalize(); + let digest_bytes = digest.as_slice(); + let digest_out = &mut self.digest[..digest_bytes.len().max($size)]; + digest_out.copy_from_slice(digest_bytes); + digest_out + } + + fn reset(&mut self) { + use digest::Digest; + self.state.reset(); + } + } + + impl core2::io::Write for $name { + fn write(&mut self, buf: &[u8]) -> core2::io::Result { + use multihash_derive::Hasher as _; + + self.update(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> core2::io::Result<()> { + Ok(()) + } + } + }; +} + +#[cfg(feature = "sha1")] +pub mod sha1 { + derive_rustcrypto_hasher!(::sha1::Sha1, Sha1, 20); +} + +#[cfg(feature = "sha2")] +pub mod sha2 { + derive_rustcrypto_hasher!(::sha2::Sha256, Sha2_256, 32); + derive_rustcrypto_hasher!(::sha2::Sha512, Sha2_512, 64); +} + +#[cfg(feature = "sha3")] +pub mod sha3 { + derive_rustcrypto_hasher!(::sha3::Sha3_224, Sha3_224, 28); + derive_rustcrypto_hasher!(::sha3::Sha3_256, Sha3_256, 32); + derive_rustcrypto_hasher!(::sha3::Sha3_384, Sha3_384, 48); + derive_rustcrypto_hasher!(::sha3::Sha3_512, Sha3_512, 64); + + derive_rustcrypto_hasher!(::sha3::Keccak224, Keccak224, 28); + derive_rustcrypto_hasher!(::sha3::Keccak256, Keccak256, 32); + derive_rustcrypto_hasher!(::sha3::Keccak384, Keccak384, 48); + derive_rustcrypto_hasher!(::sha3::Keccak512, Keccak512, 64); +} + +#[cfg(feature = "ripemd")] +pub mod ripemd { + derive_rustcrypto_hasher!(::ripemd::Ripemd160, Ripemd160, 20); + derive_rustcrypto_hasher!(::ripemd::Ripemd256, Ripemd256, 32); + derive_rustcrypto_hasher!(::ripemd::Ripemd320, Ripemd320, 40); +} + +#[cfg(feature = "strobe")] +pub mod strobe { + use strobe_rs::{SecParam, Strobe}; + + /// Strobe hasher. + pub struct StrobeHasher { + strobe: Strobe, + initialized: bool, + digest: [u8; S], + } + + impl Default for StrobeHasher { + fn default() -> Self { + Self { + strobe: Strobe::new(b"StrobeHash", SecParam::B128), + initialized: false, + digest: [0; S], + } + } + } + + impl multihash_derive::Hasher for StrobeHasher { + fn update(&mut self, input: &[u8]) { + self.strobe.ad(input, self.initialized); + self.initialized = true; + } + + fn finalize(&mut self) -> &[u8] { + self.strobe.clone().prf(&mut self.digest, false); + &self.digest + } + + fn reset(&mut self) { + let Self { strobe, .. } = Self::default(); + self.strobe = strobe; + self.initialized = false; + } + } + + derive_write!(StrobeHasher); + + /// 256 bit strobe hasher. + pub type Strobe256 = StrobeHasher<32>; + + /// 512 bit strobe hasher. + pub type Strobe512 = StrobeHasher<64>; +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/src/lib.rs new file mode 100644 index 000000000000..893e4dd1f419 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/src/lib.rs @@ -0,0 +1,232 @@ +#![cfg_attr(feature = "arb", allow(unreachable_code))] // Otherwise the "Cargo Hack" check fails since "arb" includes no hash algos by default +#![cfg_attr(docsrs, feature(doc_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +//! A batteries-included code table of multihashes. +//! +//! Whilst the `multihash` crate itself only defines the data structure, this crate defines a codetable via [`multihash_derive`] for several hashers. +//! Although convenient, depending on this crate will increase the dependency footprint of your project. +//! +//! It is only recommended as a getting-started-quickly solution. +//! For production, you should either define your own codetable or rely only on the `multihash` crate itself. + +mod hasher_impl; + +pub use multihash_derive::MultihashDigest; + +#[cfg(feature = "blake2b")] +#[cfg_attr(docsrs, doc(cfg(feature = "blake2b")))] +pub use crate::hasher_impl::blake2b::{Blake2b256, Blake2b512, Blake2bHasher}; +#[cfg(feature = "blake2s")] +#[cfg_attr(docsrs, doc(cfg(feature = "blake2s")))] +pub use crate::hasher_impl::blake2s::{Blake2s128, Blake2s256, Blake2sHasher}; +#[cfg(feature = "blake3")] +#[cfg_attr(docsrs, doc(cfg(feature = "blake3")))] +pub use crate::hasher_impl::blake3::{Blake3Hasher, Blake3_256}; +#[cfg(feature = "ripemd")] +#[cfg_attr(docsrs, doc(cfg(feature = "ripemd")))] +pub use crate::hasher_impl::ripemd::{Ripemd160, Ripemd256, Ripemd320}; +#[cfg(feature = "sha1")] +#[cfg_attr(docsrs, doc(cfg(feature = "sha1")))] +pub use crate::hasher_impl::sha1::Sha1; +#[cfg(feature = "sha2")] +#[cfg_attr(docsrs, doc(cfg(feature = "sha2")))] +pub use crate::hasher_impl::sha2::{Sha2_256, Sha2_512}; +#[cfg(feature = "sha3")] +#[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] +pub use crate::hasher_impl::sha3::{ + Keccak224, Keccak256, Keccak384, Keccak512, Sha3_224, Sha3_256, Sha3_384, Sha3_512, +}; +#[cfg(feature = "strobe")] +#[cfg_attr(docsrs, doc(cfg(feature = "strobe")))] +pub use crate::hasher_impl::strobe::{Strobe256, Strobe512, StrobeHasher}; + +/// Default (cryptographically secure) Multihash implementation. +/// +/// This is a default set of hashing algorithms. Usually applications would use their own subset of +/// algorithms. See the [`multihash-derive`] crate for more information. +/// +/// [`multihash-derive`]: https://docs.rs/multihash-derive +#[cfg_attr(feature = "arb", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Copy, Clone, Debug, Eq, MultihashDigest, PartialEq)] +#[mh(alloc_size = 64)] +pub enum Code { + /// SHA-256 (32-byte hash size) + #[cfg(feature = "sha2")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha2")))] + #[mh(code = 0x12, hasher = crate::Sha2_256)] + Sha2_256, + /// SHA-512 (64-byte hash size) + #[cfg(feature = "sha2")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha2")))] + #[mh(code = 0x13, hasher = crate::Sha2_512)] + Sha2_512, + /// SHA3-224 (28-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x17, hasher = crate::Sha3_224)] + Sha3_224, + /// SHA3-256 (32-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x16, hasher = crate::Sha3_256)] + Sha3_256, + /// SHA3-384 (48-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x15, hasher = crate::Sha3_384)] + Sha3_384, + /// SHA3-512 (64-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x14, hasher = crate::Sha3_512)] + Sha3_512, + /// Keccak-224 (28-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x1a, hasher = crate::Keccak224)] + Keccak224, + /// Keccak-256 (32-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x1b, hasher = crate::Keccak256)] + Keccak256, + /// Keccak-384 (48-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x1c, hasher = crate::Keccak384)] + Keccak384, + /// Keccak-512 (64-byte hash size) + #[cfg(feature = "sha3")] + #[cfg_attr(docsrs, doc(cfg(feature = "sha3")))] + #[mh(code = 0x1d, hasher = crate::Keccak512)] + Keccak512, + /// BLAKE2b-256 (32-byte hash size) + #[cfg(feature = "blake2b")] + #[cfg_attr(docsrs, doc(cfg(feature = "blake2b")))] + #[mh(code = 0xb220, hasher = crate::Blake2b256)] + Blake2b256, + /// BLAKE2b-512 (64-byte hash size) + #[cfg(feature = "blake2b")] + #[cfg_attr(docsrs, doc(cfg(feature = "blake2b")))] + #[mh(code = 0xb240, hasher = crate::Blake2b512)] + Blake2b512, + /// BLAKE2s-128 (16-byte hash size) + #[cfg(feature = "blake2s")] + #[cfg_attr(docsrs, doc(cfg(feature = "blake2s")))] + #[mh(code = 0xb250, hasher = crate::Blake2s128)] + Blake2s128, + /// BLAKE2s-256 (32-byte hash size) + #[cfg(feature = "blake2s")] + #[cfg_attr(docsrs, doc(cfg(feature = "blake2s")))] + #[mh(code = 0xb260, hasher = crate::Blake2s256)] + Blake2s256, + /// BLAKE3-256 (32-byte hash size) + #[cfg(feature = "blake3")] + #[cfg_attr(docsrs, doc(cfg(feature = "blake3")))] + #[mh(code = 0x1e, hasher = crate::Blake3_256)] + Blake3_256, + /// RIPEMD-160 (20-byte hash size) + #[cfg(feature = "ripemd")] + #[cfg_attr(docsrs, doc(cfg(feature = "ripemd")))] + #[mh(code = 0x1053, hasher = crate::Ripemd160)] + Ripemd160, + /// RIPEMD-256 (32-byte hash size) + #[cfg(feature = "ripemd")] + #[cfg_attr(docsrs, doc(cfg(feature = "ripemd")))] + #[mh(code = 0x1054, hasher = crate::Ripemd256)] + Ripemd256, + /// RIPEMD-320 (40-byte hash size) + #[cfg(feature = "ripemd")] + #[cfg_attr(docsrs, doc(cfg(feature = "ripemd")))] + #[mh(code = 0x1055, hasher = crate::Ripemd320)] + Ripemd320, +} + +#[cfg(all(test, any(feature = "sha2", feature = "sha3")))] +mod tests { + use super::*; + #[cfg(feature = "sha3")] + use crate::hasher_impl::sha3::{Sha3_256, Sha3_512}; + #[cfg(feature = "sha3")] + use multihash_derive::Hasher; + #[cfg(feature = "sha2")] + use multihash_derive::{Multihash, MultihashDigest}; + + #[test] + #[cfg(feature = "sha3")] + fn test_hasher_256() { + let mut hasher = Sha3_256::default(); + hasher.update(b"hello world"); + let digest = hasher.finalize(); + let hash = Code::Sha3_256.wrap(digest).unwrap(); + let hash2 = Code::Sha3_256.digest(b"hello world"); + assert_eq!(hash.code(), u64::from(Code::Sha3_256)); + assert_eq!(hash.size(), 32); + assert_eq!(hash.digest(), digest); + assert_eq!(hash, hash2); + } + + #[test] + #[cfg(feature = "sha3")] + fn test_hasher_512() { + let mut hasher = Sha3_512::default(); + hasher.update(b"hello world"); + let digest = hasher.finalize(); + let hash = Code::Sha3_512.wrap(digest).unwrap(); + let hash2 = Code::Sha3_512.digest(b"hello world"); + assert_eq!(hash.code(), u64::from(Code::Sha3_512)); + assert_eq!(hash.size(), 64); + assert_eq!(hash.digest(), digest); + assert_eq!(hash, hash2); + } + + #[test] + #[cfg(feature = "sha2")] + fn roundtrip() { + let hash = Code::Sha2_256.digest(b"hello world"); + let mut buf = [0u8; 35]; + let written = hash.write(&mut buf[..]).unwrap(); + let hash2 = Multihash::<32>::read(&buf[..]).unwrap(); + assert_eq!(hash, hash2); + assert_eq!(hash.encoded_len(), written); + } + + #[test] + #[cfg(feature = "sha2")] + fn test_truncate_down() { + let hash = Code::Sha2_256.digest(b"hello world"); + let small = hash.truncate(20); + assert_eq!(small.size(), 20); + } + + #[test] + #[cfg(feature = "sha2")] + fn test_truncate_up() { + let hash = Code::Sha2_256.digest(b"hello world"); + let small = hash.truncate(100); + assert_eq!(small.size(), 32); + } + + #[test] + #[cfg(feature = "sha2")] + fn test_resize_fits() { + let hash = Code::Sha2_256.digest(b"hello world"); + let _: Multihash<32> = hash.resize().unwrap(); + } + + #[test] + #[cfg(feature = "sha2")] + fn test_resize_up() { + let hash = Code::Sha2_256.digest(b"hello world"); + let _: Multihash<100> = hash.resize().unwrap(); + } + + #[test] + #[cfg(feature = "sha2")] + fn test_resize_truncate() { + let hash = Code::Sha2_256.digest(b"hello world"); + hash.resize::<20>().unwrap_err(); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/tests/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/tests/lib.rs new file mode 100644 index 000000000000..05ff27f723c9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-codetable-0.1.4/tests/lib.rs @@ -0,0 +1,414 @@ +use std::io::{Cursor, Write}; + +use multihash_codetable::{ + Blake2b256, Blake2b512, Blake2s128, Blake2s256, Blake3_256, Keccak224, Keccak256, Keccak384, + Keccak512, Ripemd160, Ripemd256, Ripemd320, Sha1, Sha2_256, Sha2_512, Sha3_224, Sha3_256, + Sha3_384, Sha3_512, Strobe256, Strobe512, +}; +use multihash_derive::{Hasher, MultihashDigest}; + +#[derive(Clone, Copy, Debug, Eq, MultihashDigest, PartialEq)] +#[mh(alloc_size = 64)] +pub enum Code { + #[mh(code = 0x11, hasher = Sha1)] + Sha1, + #[mh(code = 0x12, hasher = Sha2_256)] + Sha2_256, + #[mh(code = 0x13, hasher = Sha2_512)] + Sha2_512, + #[mh(code = 0x17, hasher = Sha3_224)] + Sha3_224, + #[mh(code = 0x16, hasher = Sha3_256)] + Sha3_256, + #[mh(code = 0x15, hasher = Sha3_384)] + Sha3_384, + #[mh(code = 0x14, hasher = Sha3_512)] + Sha3_512, + #[mh(code = 0x1a, hasher = Keccak224)] + Keccak224, + #[mh(code = 0x1b, hasher = Keccak256)] + Keccak256, + #[mh(code = 0x1c, hasher = Keccak384)] + Keccak384, + #[mh(code = 0x1d, hasher = Keccak512)] + Keccak512, + #[mh(code = 0xb220, hasher = Blake2b256)] + Blake2b256, + #[mh(code = 0xb240, hasher = Blake2b512)] + Blake2b512, + #[mh(code = 0xb250, hasher = Blake2s128)] + Blake2s128, + #[mh(code = 0xb260, hasher = Blake2s256)] + Blake2s256, + #[mh(code = 0x1e, hasher = Blake3_256)] + Blake3_256, + #[mh(code = 0x3312e7, hasher = Strobe256)] + Strobe256, + #[mh(code = 0x3312e8, hasher = Strobe512)] + Strobe512, + #[cfg(feature = "ripemd")] + #[mh(code = 0x1053, hasher = Ripemd160)] + Ripemd160, + #[cfg(feature = "ripemd")] + #[mh(code = 0x1054, hasher = Ripemd256)] + Ripemd256, + #[cfg(feature = "ripemd")] + #[mh(code = 0x1055, hasher = Ripemd320)] + Ripemd320, +} + +macro_rules! assert_encode { + // Mutlihash enum member, Multihash code, input, Multihash as hex + {$( $alg:ty, $code:expr, $data:expr, $expect:expr; )*} => { + $( + let expected = hex::decode($expect).unwrap(); + + // From code + assert_eq!( + $code.digest($data).to_bytes(), + expected, + "{:?} encodes correctly (from code)", stringify!($alg) + ); + + // From incremental hashing + let mut hasher = <$alg>::default(); + hasher.update($data); + assert_eq!( + $code.wrap(hasher.finalize()).unwrap().to_bytes(), + expected, + "{:?} encodes correctly (from hasher)", stringify!($alg) + ); + )* + } +} + +#[allow(clippy::cognitive_complexity)] +#[test] +fn multihash_encode() { + assert_encode! { + Sha1, Code::Sha1, b"beep boop", "11147c8357577f51d4f0a8d393aa1aaafb28863d9421"; + Sha2_256, Code::Sha2_256, b"helloworld", "1220936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af"; + Sha2_256, Code::Sha2_256, b"beep boop", "122090ea688e275d580567325032492b597bc77221c62493e76330b85ddda191ef7c"; + Sha2_512, Code::Sha2_512, b"hello world", "1340309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"; + Sha3_224, Code::Sha3_224, b"hello world", "171Cdfb7f18c77e928bb56faeb2da27291bd790bc1045cde45f3210bb6c5"; + Sha3_256, Code::Sha3_256, b"hello world", "1620644bcc7e564373040999aac89e7622f3ca71fba1d972fd94a31c3bfbf24e3938"; + Sha3_384, Code::Sha3_384, b"hello world", "153083bff28dde1b1bf5810071c6643c08e5b05bdb836effd70b403ea8ea0a634dc4997eb1053aa3593f590f9c63630dd90b"; + Sha3_512, Code::Sha3_512, b"hello world", "1440840006653e9ac9e95117a15c915caab81662918e925de9e004f774ff82d7079a40d4d27b1b372657c61d46d470304c88c788b3a4527ad074d1dccbee5dbaa99a"; + Keccak224, Code::Keccak224, b"hello world", "1A1C25f3ecfebabe99686282f57f5c9e1f18244cfee2813d33f955aae568"; + Keccak256, Code::Keccak256, b"hello world", "1B2047173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"; + Keccak384, Code::Keccak384, b"hello world", "1C3065fc99339a2a40e99d3c40d695b22f278853ca0f925cde4254bcae5e22ece47e6441f91b6568425adc9d95b0072eb49f"; + Keccak512, Code::Keccak512, b"hello world", "1D403ee2b40047b8060f68c67242175660f4174d0af5c01d47168ec20ed619b0b7c42181f40aa1046f39e2ef9efc6910782a998e0013d172458957957fac9405b67d"; + Blake2b512, Code::Blake2b512, b"hello world", "c0e40240021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0"; + Blake2s256, Code::Blake2s256, b"hello world", "e0e402209aec6806794561107e594b1f6a8a6b0c92a0cba9acf5e5e93cca06f781813b0b"; + Blake2b256, Code::Blake2b256, b"hello world", "a0e40220256c83b297114d201b30179f3f0ef0cace9783622da5974326b436178aeef610"; + Blake2s128, Code::Blake2s128, b"hello world", "d0e4021037deae0226c30da2ab424a7b8ee14e83"; + Blake3_256, Code::Blake3_256, b"hello world", "1e20d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24"; + } + + #[cfg(feature = "ripemd")] + assert_encode! { + Ripemd160, Code::Ripemd160, b"hello world", "d3201498c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"; + Ripemd256, Code::Ripemd256, b"hello world", "d420200d375cf9d9ee95a3bb15f757c81e93bb0ad963edf69dc4d12264031814608e37"; + Ripemd320, Code::Ripemd320, b"hello world", "d520280e12fe7d075f8e319e07c106917eddb0135e9a10aefb50a8a07ccb0582ff1fa27b95ed5af57fd5c6"; + } +} + +macro_rules! assert_decode { + {$( $code:expr, $hash:expr; )*} => { + $( + let hash = hex::decode($hash).unwrap(); + assert_eq!( + Multihash::from_bytes(&hash).unwrap().code(), + u64::from($code), + "{:?} decodes correctly", stringify!($code) + ); + )* + } +} + +#[test] +fn assert_decode() { + assert_decode! { + Code::Sha1, "11147c8357577f51d4f0a8d393aa1aaafb28863d9421"; + Code::Sha2_256, "1220936a185caaa266bb9cbe981e9e05cb78cd732b0b3280eb944412bb6f8f8f07af"; + Code::Sha2_256, "122090ea688e275d580567325032492b597bc77221c62493e76330b85ddda191ef7c"; + Code::Sha2_512, "1340309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"; + Code::Sha3_224, "171Cdfb7f18c77e928bb56faeb2da27291bd790bc1045cde45f3210bb6c5"; + Code::Sha3_256, "1620644bcc7e564373040999aac89e7622f3ca71fba1d972fd94a31c3bfbf24e3938"; + Code::Sha3_384, "153083bff28dde1b1bf5810071c6643c08e5b05bdb836effd70b403ea8ea0a634dc4997eb1053aa3593f590f9c63630dd90b"; + Code::Sha3_512, "1440840006653e9ac9e95117a15c915caab81662918e925de9e004f774ff82d7079a40d4d27b1b372657c61d46d470304c88c788b3a4527ad074d1dccbee5dbaa99a"; + Code::Keccak224, "1A1C25f3ecfebabe99686282f57f5c9e1f18244cfee2813d33f955aae568"; + Code::Keccak256, "1B2047173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"; + Code::Keccak384, "1C3065fc99339a2a40e99d3c40d695b22f278853ca0f925cde4254bcae5e22ece47e6441f91b6568425adc9d95b0072eb49f"; + Code::Keccak512, "1D403ee2b40047b8060f68c67242175660f4174d0af5c01d47168ec20ed619b0b7c42181f40aa1046f39e2ef9efc6910782a998e0013d172458957957fac9405b67d"; + Code::Blake2b512, "c0e40240021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0"; + Code::Blake2s256, "e0e402209aec6806794561107e594b1f6a8a6b0c92a0cba9acf5e5e93cca06f781813b0b"; + Code::Blake2b256, "a0e40220256c83b297114d201b30179f3f0ef0cace9783622da5974326b436178aeef610"; + Code::Blake2s128, "d0e4021037deae0226c30da2ab424a7b8ee14e83"; + Code::Blake3_256, "1e20d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24"; + } + #[cfg(feature = "ripemd")] + assert_decode! { + Code::Ripemd160, "d3201498c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"; + Code::Ripemd256, "d420200d375cf9d9ee95a3bb15f757c81e93bb0ad963edf69dc4d12264031814608e37"; + Code::Ripemd320, "d520280e12fe7d075f8e319e07c106917eddb0135e9a10aefb50a8a07ccb0582ff1fa27b95ed5af57fd5c6"; + } +} + +macro_rules! assert_roundtrip { + ($( $code:expr, $alg:ident; )*) => { + $( + // Hashing with one call + { + let hash = $code.digest(b"helloworld"); + assert_eq!( + Multihash::from_bytes(&hash.to_bytes()).unwrap().code(), + hash.code() + ); + } + // Hashing incrementally + { + let mut hasher = <$alg>::default(); + hasher.update(b"helloworld"); + let hash = $code.wrap(hasher.finalize()).unwrap(); + assert_eq!( + Multihash::from_bytes(&hash.to_bytes()).unwrap().code(), + hash.code() + ); + } + // Hashing as `Write` implementation + { + let mut hasher = <$alg>::default(); + hasher.write_all(b"helloworld").unwrap(); + let hash = $code.wrap(hasher.finalize()).unwrap(); + assert_eq!( + Multihash::from_bytes(&hash.to_bytes()).unwrap().code(), + hash.code() + ); + } + )* + } +} + +#[allow(clippy::cognitive_complexity)] +#[test] +fn assert_roundtrip() { + assert_roundtrip!( + Code::Sha1, Sha1; + Code::Sha2_256, Sha2_256; + Code::Sha2_512, Sha2_512; + Code::Sha3_224, Sha3_224; + Code::Sha3_256, Sha3_256; + Code::Sha3_384, Sha3_384; + Code::Sha3_512, Sha3_512; + Code::Keccak224, Keccak224; + Code::Keccak256, Keccak256; + Code::Keccak384, Keccak384; + Code::Keccak512, Keccak512; + Code::Blake2b512, Blake2b512; + Code::Blake2s256, Blake2s256; + Code::Blake3_256, Blake3_256; + ); + + #[cfg(feature = "ripemd")] + assert_roundtrip! { + Code::Ripemd160, Ripemd160; + Code::Ripemd256, Ripemd256; + Code::Ripemd320, Ripemd320; + } +} + +/// Testing the public interface of `Multihash` and coversions to it +fn multihash_methods(code: Code, prefix: &str, digest_str: &str) +where + H: Hasher + Default, +{ + let digest = hex::decode(digest_str).unwrap(); + let expected_bytes = hex::decode(format!("{prefix}{digest_str}")).unwrap(); + let mut expected_cursor = Cursor::new(&expected_bytes); + let multihash = code.digest(b"hello world"); + + assert_eq!(Multihash::wrap(code.into(), &digest).unwrap(), multihash); + assert_eq!(multihash.code(), u64::from(code)); + assert_eq!(multihash.size() as usize, digest.len()); + assert_eq!(multihash.digest(), digest); + assert_eq!(Multihash::read(&mut expected_cursor).unwrap(), multihash); + assert_eq!(Multihash::from_bytes(&expected_bytes).unwrap(), multihash); + let mut written_buf = Vec::new(); + multihash.write(&mut written_buf).unwrap(); + assert_eq!(written_buf, expected_bytes); + assert_eq!(multihash.to_bytes(), expected_bytes); + + // Test from hasher digest conversion + let mut hasher = H::default(); + hasher.update(b"hello world"); + let multihash_from_digest = code.wrap(hasher.finalize()).unwrap(); + assert_eq!(multihash_from_digest.code(), u64::from(code)); + assert_eq!(multihash_from_digest.size() as usize, digest.len()); + assert_eq!(multihash_from_digest.digest(), digest); +} + +#[test] +fn test_multihash_methods() { + multihash_methods::( + Code::Sha1, + "1114", + "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed", + ); + multihash_methods::( + Code::Sha2_256, + "1220", + "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9", + ); + multihash_methods::( + Code::Sha2_512, + "1340", + "309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"); + multihash_methods::( + Code::Sha3_224, + "171C", + "dfb7f18c77e928bb56faeb2da27291bd790bc1045cde45f3210bb6c5", + ); + multihash_methods::( + Code::Sha3_256, + "1620", + "644bcc7e564373040999aac89e7622f3ca71fba1d972fd94a31c3bfbf24e3938", + ); + multihash_methods::( + Code::Sha3_384, + "1530", + "83bff28dde1b1bf5810071c6643c08e5b05bdb836effd70b403ea8ea0a634dc4997eb1053aa3593f590f9c63630dd90b"); + multihash_methods::( + Code::Sha3_512, + "1440", + "840006653e9ac9e95117a15c915caab81662918e925de9e004f774ff82d7079a40d4d27b1b372657c61d46d470304c88c788b3a4527ad074d1dccbee5dbaa99a"); + multihash_methods::( + Code::Keccak224, + "1A1C", + "25f3ecfebabe99686282f57f5c9e1f18244cfee2813d33f955aae568", + ); + multihash_methods::( + Code::Keccak256, + "1B20", + "47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad", + ); + multihash_methods::( + Code::Keccak384, + "1C30", + "65fc99339a2a40e99d3c40d695b22f278853ca0f925cde4254bcae5e22ece47e6441f91b6568425adc9d95b0072eb49f"); + multihash_methods::( + Code::Keccak512, + "1D40", + "3ee2b40047b8060f68c67242175660f4174d0af5c01d47168ec20ed619b0b7c42181f40aa1046f39e2ef9efc6910782a998e0013d172458957957fac9405b67d"); + multihash_methods::( + Code::Blake2b512, + "c0e40240", + "021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0"); + multihash_methods::( + Code::Blake2s256, + "e0e40220", + "9aec6806794561107e594b1f6a8a6b0c92a0cba9acf5e5e93cca06f781813b0b", + ); + multihash_methods::( + Code::Blake2b256, + "a0e40220", + "256c83b297114d201b30179f3f0ef0cace9783622da5974326b436178aeef610", + ); + multihash_methods::( + Code::Blake2s128, + "d0e40210", + "37deae0226c30da2ab424a7b8ee14e83", + ); + multihash_methods::( + Code::Blake3_256, + "1e20", + "d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24", + ); + #[cfg(feature = "ripemd")] + { + multihash_methods::( + Code::Ripemd160, + "d32014", + "98c615784ccb5fe5936fbc0cbe9dfdb408d92f0f", + ); + multihash_methods::( + Code::Ripemd256, + "d42020", + "0d375cf9d9ee95a3bb15f757c81e93bb0ad963edf69dc4d12264031814608e37", + ); + multihash_methods::( + Code::Ripemd320, + "d52028", + "0e12fe7d075f8e319e07c106917eddb0135e9a10aefb50a8a07ccb0582ff1fa27b95ed5af57fd5c6", + ); + } +} + +#[test] +fn multihash_errors() { + assert!( + Multihash::from_bytes(&[]).is_err(), + "Should error on empty data" + ); + assert!( + Multihash::from_bytes(&[1, 2, 3]).is_err(), + "Should error on invalid multihash" + ); + assert!( + Multihash::from_bytes(&[1, 2, 3]).is_err(), + "Should error on invalid prefix" + ); + assert!( + Multihash::from_bytes(&[0x12, 0x20, 0xff]).is_err(), + "Should error on correct prefix with wrong digest" + ); + let identity_code: u8 = 0x00; + let identity_length = 3; + assert!( + Multihash::from_bytes(&[identity_code, identity_length, 1, 2, 3, 4]).is_err(), + "Should error on wrong hash length" + ); +} + +#[test] +fn blak3_non_default_digest() { + use multihash_codetable::Blake3Hasher; + use multihash_derive::MultihashDigest; + const DIGEST_SIZE: usize = 16; + pub struct ContentHasher(Blake3Hasher); + + pub struct ContentHash([u8; DIGEST_SIZE]); + + impl ContentHasher { + fn new() -> ContentHasher { + ContentHasher(Blake3Hasher::default()) + } + + fn write(&mut self, input: &[u8]) { + self.0.update(input); + } + + fn finish(&mut self) -> ContentHash { + let hash = multihash_codetable::Code::Blake3_256 + .wrap(self.0.finalize()) + .unwrap(); + let resized_hash = hash.resize::().unwrap(); + + let mut content = ContentHash([0u8; DIGEST_SIZE]); + content.0.copy_from_slice(resized_hash.digest()); + content + } + + fn reset(&mut self) { + self.0.reset(); + } + } + + let mut hasher = ContentHasher::new(); + hasher.write("foobar".as_bytes()); + let content_hash = hasher.finish(); + hasher.reset(); + + let expected = hex::decode("aa51dcd43d5c6c5203ee16906fd6b35d").unwrap(); + assert_eq!(&content_hash.0, expected.as_slice()) +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/.cargo_vcs_info.json deleted file mode 100644 index 1d8476b3ebf8..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "34bf38df510edb2266913e52b97cbe46c8168667" - }, - "path_in_vcs": "derive" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/Cargo.toml deleted file mode 100644 index 559831d9712f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "multihash-derive" -version = "0.8.1" -authors = ["David Craven "] -description = "Proc macro for deriving custom multihash tables." -license = "MIT" -repository = "https://github.com/multiformats/multihash" -resolver = "2" - -[lib] -proc-macro = true - -[dependencies.proc-macro-crate] -version = "~1.1.0" - -[dependencies.proc-macro-error] -version = "1.0.4" - -[dependencies.proc-macro2] -version = "1.0.24" -features = ["span-locations"] - -[dependencies.quote] -version = "1.0.7" - -[dependencies.syn] -version = "1.0.42" - -[dependencies.synstructure] -version = "0.12.4" - -[dev-dependencies.pretty_assertions] -version = "1.0.0" - -[features] -default = ["std"] -std = [] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/Cargo.toml.orig deleted file mode 100644 index 4f3aa927936e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/Cargo.toml.orig +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "multihash-derive" -version = "0.8.1" -authors = ["David Craven "] -edition = "2018" -description = "Proc macro for deriving custom multihash tables." -license = "MIT" -repository = "https://github.com/multiformats/multihash" -resolver = "2" - -[lib] -proc-macro = true - -[dependencies] -proc-macro2 = { version = "1.0.24", features = ["span-locations"] } -proc-macro-crate = "~1.1.0" -proc-macro-error = "1.0.4" -quote = "1.0.7" -syn = "1.0.42" -synstructure = "0.12.4" - -[features] -default = ["std"] -std = [] - -[dev-dependencies] -pretty_assertions = "1.0.0" -multihash = { path = "..", default-features = false, features = ["derive", "sha2"] } diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/lib.rs deleted file mode 100644 index a9c633ecfa26..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! This proc macro derives a custom Multihash code table from a list of hashers. It also -//! generates a public type called `Multihash` which corresponds to the specified `alloc_size`. -//! -//! The digests are stack allocated with a fixed size. That size needs to be big enough to hold any -//! of the specified hash digests. This cannot be determined reliably on compile-time, hence it -//! needs to set manually via the `alloc_size` attribute. Also you might want to set it to bigger -//! sizes then necessarily needed for backwards/forward compatibility. -//! -//! If you set `#mh(alloc_size = …)` to a too low value, you will get compiler errors. Please note -//! the the sizes are checked only on a syntactic level and *not* on the type level. This means -//! that digest need to have a size const generic, which is a valid `usize`, for example `32` or -//! `64`. -//! -//! You can disable those compiler errors with setting the `no_alloc_size_errors` attribute. This -//! can be useful if you e.g. have specified type aliases for your hash digests and you are sure -//! you use the correct value for `alloc_size`. -//! -//! # Example -//! -//! ``` -//! use multihash::derive::Multihash; -//! use multihash::MultihashDigest; -//! -//! #[derive(Clone, Copy, Debug, Eq, Multihash, PartialEq)] -//! #[mh(alloc_size = 64)] -//! pub enum Code { -//! #[mh(code = 0x01, hasher = multihash::Sha2_256)] -//! Foo, -//! #[mh(code = 0x02, hasher = multihash::Sha2_512)] -//! Bar, -//! } -//! -//! let hash = Code::Foo.digest(b"hello world!"); -//! println!("{:02x?}", hash); -//! ``` -extern crate proc_macro; - -mod multihash; -mod utils; - -use proc_macro::TokenStream; -use proc_macro_error::proc_macro_error; -use synstructure::{decl_derive, Structure}; - -decl_derive!([Multihash, attributes(mh)] => #[proc_macro_error] multihash); -fn multihash(s: Structure) -> TokenStream { - multihash::multihash(s).into() -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/multihash.rs deleted file mode 100644 index 4d06e775a0a6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/multihash.rs +++ /dev/null @@ -1,372 +0,0 @@ -use std::collections::HashSet; - -use crate::utils; -use proc_macro2::{Span, TokenStream}; -use quote::quote; -#[cfg(not(test))] -use quote::ToTokens; -use syn::parse::{Parse, ParseStream}; -#[cfg(not(test))] -use syn::spanned::Spanned; -use synstructure::{Structure, VariantInfo}; - -mod kw { - use syn::custom_keyword; - - custom_keyword!(code); - custom_keyword!(hasher); - custom_keyword!(mh); - custom_keyword!(alloc_size); -} - -/// Attributes for the enum items. -#[derive(Debug)] -#[allow(clippy::large_enum_variant)] -enum MhAttr { - Code(utils::Attr), - Hasher(utils::Attr>), -} - -impl Parse for MhAttr { - fn parse(input: ParseStream) -> syn::Result { - if input.peek(kw::code) { - Ok(MhAttr::Code(input.parse()?)) - } else if input.peek(kw::hasher) { - Ok(MhAttr::Hasher(input.parse()?)) - } else { - Err(syn::Error::new(input.span(), "unknown attribute")) - } - } -} - -/// Attributes of the top-level derive. -#[derive(Debug)] -enum DeriveAttr { - AllocSize(utils::Attr), -} - -impl Parse for DeriveAttr { - fn parse(input: ParseStream) -> syn::Result { - if input.peek(kw::alloc_size) { - Ok(Self::AllocSize(input.parse()?)) - } else { - Err(syn::Error::new(input.span(), "unknown attribute")) - } - } -} - -struct Params { - code_enum: syn::Ident, -} - -#[derive(Debug)] -struct Hash { - ident: syn::Ident, - code: syn::Expr, - hasher: Box, -} - -impl Hash { - fn code_into_u64(&self, params: &Params) -> TokenStream { - let ident = &self.ident; - let code_enum = ¶ms.code_enum; - let code = &self.code; - quote!(#code_enum::#ident => #code) - } - - fn code_from_u64(&self) -> TokenStream { - let ident = &self.ident; - let code = &self.code; - quote!(#code => Ok(Self::#ident)) - } - - fn code_digest(&self) -> TokenStream { - let ident = &self.ident; - let hasher = &self.hasher; - let code = &self.code; - quote!(Self::#ident => { - let mut hasher = #hasher::default(); - hasher.update(input); - Multihash::wrap(#code, hasher.finalize()).unwrap() - }) - } -} - -impl<'a> From<&'a VariantInfo<'a>> for Hash { - fn from(bi: &'a VariantInfo<'a>) -> Self { - let mut code = None; - let mut hasher = None; - for attr in bi.ast().attrs { - let attr: Result, _> = syn::parse2(attr.tokens.clone()); - if let Ok(attr) = attr { - for attr in attr.attrs { - match attr { - MhAttr::Code(attr) => code = Some(attr.value), - MhAttr::Hasher(attr) => hasher = Some(attr.value), - } - } - } - } - - let ident = bi.ast().ident.clone(); - let code = code.unwrap_or_else(|| { - let msg = "Missing code attribute: e.g. #[mh(code = multihash::SHA3_256)]"; - #[cfg(test)] - panic!("{}", msg); - #[cfg(not(test))] - proc_macro_error::abort!(ident, msg); - }); - let hasher = hasher.unwrap_or_else(|| { - let msg = "Missing hasher attribute: e.g. #[mh(hasher = multihash::Sha2_256)]"; - #[cfg(test)] - panic!("{}", msg); - #[cfg(not(test))] - proc_macro_error::abort!(ident, msg); - }); - Self { - ident, - code, - hasher, - } - } -} - -/// Parse top-level enum [#mh()] attributes. -/// -/// Returns the `alloc_size` and whether errors regarding to `alloc_size` should be reported or not. -fn parse_code_enum_attrs(ast: &syn::DeriveInput) -> syn::LitInt { - let mut alloc_size = None; - - for attr in &ast.attrs { - let derive_attrs: Result, _> = syn::parse2(attr.tokens.clone()); - if let Ok(derive_attrs) = derive_attrs { - for derive_attr in derive_attrs.attrs { - match derive_attr { - DeriveAttr::AllocSize(alloc_size_attr) => { - alloc_size = Some(alloc_size_attr.value) - } - } - } - } - } - match alloc_size { - Some(alloc_size) => alloc_size, - None => { - let msg = "enum is missing `alloc_size` attribute: e.g. #[mh(alloc_size = 64)]"; - #[cfg(test)] - panic!("{}", msg); - #[cfg(not(test))] - proc_macro_error::abort!(&ast.ident, msg); - } - } -} - -/// Return an error if the same code is used several times. -/// -/// This only checks for string equality, though this should still catch most errors caused by -/// copy and pasting. -fn error_code_duplicates(hashes: &[Hash]) { - // Use a temporary store to determine whether a certain value is unique or not - let mut uniq = HashSet::new(); - - hashes.iter().for_each(|hash| { - let code = &hash.code; - let msg = format!( - "the #mh(code) attribute `{}` is defined multiple times", - quote!(#code) - ); - - // It's a duplicate - if !uniq.insert(code) { - #[cfg(test)] - panic!("{}", msg); - #[cfg(not(test))] - { - let already_defined = uniq.get(code).unwrap(); - let line = already_defined.to_token_stream().span().start().line; - proc_macro_error::emit_error!( - &hash.code, msg; - note = "previous definition of `{}` at line {}", quote!(#code), line; - ); - } - } - }); -} - -/// An error that contains a span in order to produce nice error messages. -#[derive(Debug)] -#[allow(dead_code)] -struct ParseError(Span); - -pub fn multihash(s: Structure) -> TokenStream { - let mh_crate = match utils::use_crate("multihash") { - Ok(ident) => ident, - Err(e) => { - let err = syn::Error::new(Span::call_site(), e).to_compile_error(); - return quote!(#err); - } - }; - let code_enum = &s.ast().ident; - let alloc_size = parse_code_enum_attrs(s.ast()); - let hashes: Vec<_> = s.variants().iter().map(Hash::from).collect(); - - error_code_duplicates(&hashes); - - let params = Params { - code_enum: code_enum.clone(), - }; - - let code_into_u64 = hashes.iter().map(|h| h.code_into_u64(¶ms)); - let code_from_u64 = hashes.iter().map(|h| h.code_from_u64()); - let code_digest = hashes.iter().map(|h| h.code_digest()); - - quote! { - /// A Multihash with the same allocated size as the Multihashes produces by this derive. - pub type Multihash = #mh_crate::MultihashGeneric<#alloc_size>; - - impl #mh_crate::MultihashDigest<#alloc_size> for #code_enum { - fn digest(&self, input: &[u8]) -> Multihash { - use #mh_crate::Hasher; - match self { - #(#code_digest,)* - _ => unreachable!(), - } - } - - fn wrap(&self, digest: &[u8]) -> Result { - Multihash::wrap((*self).into(), digest) - } - } - - impl From<#code_enum> for u64 { - fn from(code: #code_enum) -> Self { - match code { - #(#code_into_u64,)* - _ => unreachable!(), - } - } - } - - impl core::convert::TryFrom for #code_enum { - type Error = #mh_crate::Error; - - fn try_from(code: u64) -> Result { - match code { - #(#code_from_u64,)* - _ => Err(#mh_crate::Error::UnsupportedCode(code)) - } - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_multihash_derive() { - let input = quote! { - #[derive(Clone, Multihash)] - #[mh(alloc_size = 32)] - pub enum Code { - #[mh(code = multihash::IDENTITY, hasher = multihash::Identity256)] - Identity256, - /// Multihash array for hash function. - #[mh(code = 0x38b64f, hasher = multihash::Strobe256)] - Strobe256, - } - }; - let expected = quote! { - /// A Multihash with the same allocated size as the Multihashes produces by this derive. - pub type Multihash = multihash::MultihashGeneric<32>; - - impl multihash::MultihashDigest<32> for Code { - fn digest(&self, input: &[u8]) -> Multihash { - use multihash::Hasher; - match self { - Self::Identity256 => { - let mut hasher = multihash::Identity256::default(); - hasher.update(input); - Multihash::wrap(multihash::IDENTITY, hasher.finalize()).unwrap() - }, - Self::Strobe256 => { - let mut hasher = multihash::Strobe256::default(); - hasher.update(input); - Multihash::wrap(0x38b64f, hasher.finalize()).unwrap() - }, - _ => unreachable!(), - } - } - - fn wrap(&self, digest: &[u8]) -> Result { - Multihash::wrap((*self).into(), digest) - } - } - - impl From for u64 { - fn from(code: Code) -> Self { - match code { - Code::Identity256 => multihash::IDENTITY, - Code::Strobe256 => 0x38b64f, - _ => unreachable!(), - } - } - } - - impl core::convert::TryFrom for Code { - type Error = multihash::Error; - - fn try_from(code: u64) -> Result { - match code { - multihash::IDENTITY => Ok(Self::Identity256), - 0x38b64f => Ok(Self::Strobe256), - _ => Err(multihash::Error::UnsupportedCode(code)) - } - } - } - }; - let derive_input = syn::parse2(input).unwrap(); - let s = Structure::new(&derive_input); - let result = multihash(s); - utils::assert_proc_macro(result, expected); - } - - #[test] - #[should_panic( - expected = "the #mh(code) attribute `multihash :: SHA2_256` is defined multiple times" - )] - fn test_multihash_error_code_duplicates() { - let input = quote! { - #[derive(Clone, Multihash)] - #[mh(alloc_size = 64)] - pub enum Multihash { - #[mh(code = multihash::SHA2_256, hasher = multihash::Sha2_256)] - Identity256, - #[mh(code = multihash::SHA2_256, hasher = multihash::Sha2_256)] - Identity256, - } - }; - let derive_input = syn::parse2(input).unwrap(); - let s = Structure::new(&derive_input); - multihash(s); - } - - #[test] - #[should_panic(expected = "the #mh(code) attribute `0x14` is defined multiple times")] - fn test_multihash_error_code_duplicates_numbers() { - let input = quote! { - #[derive(Clone, Multihash)] - #[mh(alloc_size = 32)] - pub enum Code { - #[mh(code = 0x14, hasher = multihash::Sha2_256)] - Identity256, - #[mh(code = 0x14, hasher = multihash::Sha2_256)] - Identity256, - } - }; - let derive_input = syn::parse2(input).unwrap(); - let s = Structure::new(&derive_input); - multihash(s); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/utils.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/utils.rs deleted file mode 100644 index b21edb8ae309..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.8.1/src/utils.rs +++ /dev/null @@ -1,55 +0,0 @@ -use proc_macro2::Span; -use proc_macro_crate::{crate_name, FoundCrate}; -use syn::parse::{Parse, ParseStream}; -use syn::punctuated::Punctuated; -use syn::Error; - -pub fn use_crate(name: &str) -> Result { - match crate_name(name) { - Ok(FoundCrate::Name(krate)) => Ok(syn::Ident::new(&krate, Span::call_site())), - Ok(FoundCrate::Itself) => Ok(syn::Ident::new("crate", Span::call_site())), - Err(err) => Err(Error::new(Span::call_site(), err)), - } -} - -#[derive(Debug)] -pub struct Attrs { - pub paren: syn::token::Paren, - pub attrs: Punctuated, -} - -impl Parse for Attrs { - fn parse(input: ParseStream) -> syn::Result { - let content; - let paren = syn::parenthesized!(content in input); - let attrs = content.parse_terminated(A::parse)?; - Ok(Self { paren, attrs }) - } -} - -#[derive(Debug)] -pub struct Attr { - pub key: K, - pub eq: syn::token::Eq, - pub value: V, -} - -impl Parse for Attr { - fn parse(input: ParseStream) -> syn::Result { - Ok(Self { - key: input.parse()?, - eq: input.parse()?, - value: input.parse()?, - }) - } -} - -#[cfg(test)] -pub(crate) fn assert_proc_macro( - result: proc_macro2::TokenStream, - expected: proc_macro2::TokenStream, -) { - let result = result.to_string(); - let expected = expected.to_string(); - pretty_assertions::assert_eq!(result, expected); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..a4761ebd96a8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "4c0ef5268355308d7f083482dad1c81318db4f6b" + }, + "path_in_vcs": "derive" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/Cargo.toml new file mode 100644 index 000000000000..c460424e85d8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/Cargo.toml @@ -0,0 +1,54 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "multihash-derive" +version = "0.9.1" +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Proc macro for deriving custom multihash tables." +readme = false +license = "MIT" +repository = "https://github.com/multiformats/rust-multihash" +resolver = "2" + +[lib] +name = "multihash_derive" +path = "src/lib.rs" + +[[test]] +name = "multihash" +path = "tests/multihash.rs" + +[dependencies.core2] +version = "0.4.0" +default-features = false + +[dependencies.multihash] +version = "0.19.2" +default-features = false + +[dependencies.multihash-derive-impl] +version = "0.1.2" + +[dev-dependencies.trybuild] +version = "1.0.80" + +[features] +default = ["std"] +std = [ + "multihash/std", + "core2/std", +] diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/Cargo.toml.orig new file mode 100644 index 000000000000..1f0c13573ae8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/Cargo.toml.orig @@ -0,0 +1,20 @@ +[package] +name = "multihash-derive" +version = "0.9.1" +edition = "2018" +description = "Proc macro for deriving custom multihash tables." +license = "MIT" +repository = "https://github.com/multiformats/rust-multihash" + +[features] +default = ["std"] +std = ["multihash/std", "core2/std"] + +[dependencies] +multihash-derive-impl = { version = "0.1.2", path = "../derive-impl" } +multihash = { version = "0.19.2", path = "../", default-features = false } +core2 = { version = "0.4.0", default-features = false } + +[dev-dependencies] +trybuild = "1.0.80" +multihash-codetable = { path = "../codetable", features = ["strobe"] } diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/LICENSE b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/LICENSE new file mode 100644 index 000000000000..233fd7bd7d5a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (C) 2015-2016 Friedel Ziegelmayer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Status API Training Shop Blog About Pricing diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/hasher.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/src/hasher.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/multihash-0.18.1/src/hasher.rs rename to third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/src/hasher.rs diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/src/lib.rs new file mode 100644 index 000000000000..1a3a9fd8dc27 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/src/lib.rs @@ -0,0 +1,102 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +//! A procedural macro for custom Multihash code tables. +//! +//! This proc macro derives a custom Multihash code table from a list of hashers. It also +//! generates a public type called `Multihash` which corresponds to the specified `alloc_size`. +//! +//! The digests are stack allocated with a fixed size. That size needs to be big enough to hold any +//! of the specified hash digests. This cannot be determined reliably on compile-time, hence it +//! needs to set manually via the `alloc_size` attribute. Also you might want to set it to bigger +//! sizes then necessarily needed for backwards/forward compatibility. +//! +//! If you set `#mh(alloc_size = …)` to a too low value, you will get compiler errors. Please note +//! the the sizes are checked only on a syntactic level and *not* on the type level. This means +//! that digest need to have a size const generic, which is a valid `usize`, for example `32` or +//! `64`. +//! +//! You can disable those compiler errors with setting the `no_alloc_size_errors` attribute. This +//! can be useful if you e.g. have specified type aliases for your hash digests and you are sure +//! you use the correct value for `alloc_size`. +//! +//! When you want to define your own codetable, you should only depend on `multihash-derive`. +//! It re-exports the `multihash` crate for you. +//! +//! # Example +//! +//! ```ignore : `proc-macro-crate` does not work in docs, see https://github.com/bkchr/proc-macro-crate/issues/14 +//! use multihash_derive::{Hasher, MultihashDigest}; +//! +//! struct FooHasher; +//! +//! impl Hasher for FooHasher { +//! // Implement hasher ... +//! # fn update(&mut self, input: &[u8]) { +//! # +//! # } +//! # +//! # fn finalize(&mut self) -> &[u8] { +//! # &[] +//! # } +//! # +//! # fn reset(&mut self) { +//! # +//! # } +//! } +//! +//! #[derive(Clone, Copy, Debug, Eq, MultihashDigest, PartialEq)] +//! #[mh(alloc_size = 64)] +//! pub enum Code { +//! #[mh(code = 0x01, hasher = FooHasher)] +//! Foo +//! } +//! +//! let hash = Code::Foo.digest(b"hello world!"); +//! +//! println!("{:02x?}", hash); +//! ``` + +mod hasher; + +use core::convert::TryFrom; +use core::fmt; + +pub use hasher::Hasher; +pub use multihash::Error; +pub use multihash::Multihash; +#[doc(inline)] +pub use multihash_derive_impl::Multihash; // This one is deprecated. +pub use multihash_derive_impl::MultihashDigest; + +/// The given code is not supported by this codetable. +#[derive(Debug)] +pub struct UnsupportedCode(pub u64); + +impl fmt::Display for UnsupportedCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "the code {} is not supported by this codetable", self.0) + } +} + +impl core2::error::Error for UnsupportedCode {} + +/// Trait that implements hashing. +/// +/// Typically, you won't implement this yourself but use the [`MultihashDigest`](multihash_derive_impl::MultihashDigest) custom-derive. +pub trait MultihashDigest: + TryFrom + + Into + + Send + + Sync + + Unpin + + Copy + + Eq + + fmt::Debug + + 'static +{ + /// Calculate the hash of some input data. + fn digest(&self, input: &[u8]) -> Multihash; + + /// Create a multihash from an existing multihash digest. + fn wrap(&self, digest: &[u8]) -> Result, Error>; +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_code_twice.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_code_twice.rs new file mode 100644 index 000000000000..866ecaa71b11 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_code_twice.rs @@ -0,0 +1,27 @@ +#[derive(Default)] +struct FooHasher { + +} + +impl multihash_derive::Hasher for FooHasher { + fn update(&mut self, input: &[u8]) { } + + fn finalize(&mut self) -> &[u8] { + todo!() + } + + fn reset(&mut self) { } +} + +#[derive(Clone, Debug, Eq, PartialEq, Copy, multihash_derive::MultihashDigest)] +#[mh(alloc_size = 32)] +pub enum Code { + #[mh(code = 0x0, hasher = FooHasher)] + Foo1, + #[mh(code = 0x0, hasher = FooHasher)] + Foo2, +} + +fn main() { + +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_code_twice.stderr b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_code_twice.stderr new file mode 100644 index 000000000000..8caa6d88dec1 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_code_twice.stderr @@ -0,0 +1,13 @@ +error: the #mh(code) attribute `0x0` is defined multiple times, previous definition at line 0 + --> tests/fail/no_allow_same_code_twice.rs:21:17 + | +21 | #[mh(code = 0x0, hasher = FooHasher)] + | ^^^ + +warning: unused variable: `input` + --> tests/fail/no_allow_same_code_twice.rs:7:26 + | +7 | fn update(&mut self, input: &[u8]) { } + | ^^^^^ help: if this is intentional, prefix it with an underscore: `_input` + | + = note: `#[warn(unused_variables)]` on by default diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_name_twice.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_name_twice.rs new file mode 100644 index 000000000000..73a89858cc18 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_name_twice.rs @@ -0,0 +1,27 @@ +#[derive(Default)] +struct FooHasher { + +} + +impl multihash_derive::Hasher for FooHasher { + fn update(&mut self, input: &[u8]) { } + + fn finalize(&mut self) -> &[u8] { + todo!() + } + + fn reset(&mut self) { } +} + +#[derive(Clone, Debug, Eq, PartialEq, Copy, multihash_derive::MultihashDigest)] +#[mh(alloc_size = 32)] +pub enum Code { + #[mh(code = 0x0, hasher = FooHasher)] + Foo, + #[mh(code = 0x1, hasher = FooHasher)] + Foo, +} + +fn main() { + +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_name_twice.stderr b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_name_twice.stderr new file mode 100644 index 000000000000..125a4424d1a5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/fail/no_allow_same_name_twice.stderr @@ -0,0 +1,50 @@ +error[E0428]: the name `Foo` is defined multiple times + --> tests/fail/no_allow_same_name_twice.rs:22:5 + | +20 | Foo, + | --- previous definition of the type `Foo` here +21 | #[mh(code = 0x1, hasher = FooHasher)] +22 | Foo, + | ^^^ `Foo` redefined here + | + = note: `Foo` must be defined only once in the type namespace of this enum + +warning: unused variable: `input` + --> tests/fail/no_allow_same_name_twice.rs:7:26 + | +7 | fn update(&mut self, input: &[u8]) { } + | ^^^^^ help: if this is intentional, prefix it with an underscore: `_input` + | + = note: `#[warn(unused_variables)]` on by default + +error[E0004]: non-exhaustive patterns: `&Code::Foo` not covered + --> tests/fail/no_allow_same_name_twice.rs:16:17 + | +16 | #[derive(Clone, Debug, Eq, PartialEq, Copy, multihash_derive::MultihashDigest)] + | ^^^^^ pattern `&Code::Foo` not covered + | +note: `Code` defined here + --> tests/fail/no_allow_same_name_twice.rs:18:10 + | +18 | pub enum Code { + | ^^^^ +... +22 | Foo, + | --- not covered + = note: the matched value is of type `&Code` + = note: this error originates in the derive macro `Debug` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: unreachable pattern + --> tests/fail/no_allow_same_name_twice.rs:18:10 + | +18 | pub enum Code { + | __________^ + | |__________| +19 | || #[mh(code = 0x0, hasher = FooHasher)] +20 | || Foo, + | ||_______- matches all the relevant values +21 | | #[mh(code = 0x1, hasher = FooHasher)] +22 | | Foo, + | |________^ no value can reach this + | + = note: `#[warn(unreachable_patterns)]` on by default diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/multihash.rs new file mode 100644 index 000000000000..f723ef5ea971 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/multihash.rs @@ -0,0 +1,29 @@ +use multihash_derive::{Hasher, MultihashDigest}; + +#[test] +fn ui() { + let t = trybuild::TestCases::new(); + t.pass("tests/pass/*.rs"); + t.compile_fail("tests/fail/*.rs"); +} + +#[test] +fn uses_correct_hasher() { + #[derive(Clone, Debug, Eq, PartialEq, Copy, MultihashDigest)] + #[mh(alloc_size = 32)] + pub enum Code { + /// Multihash array for hash function. + #[mh(code = 0x38b64f, hasher = multihash_codetable::Strobe256)] + Strobe256, + } + + let multihash1 = Code::Strobe256.digest(b"foobar"); + + let mut hasher = multihash_codetable::Strobe256::default(); + hasher.update(b"foobar"); + let digest = hasher.finalize(); + + let multihash2 = Multihash::wrap(0x38b64f, digest).unwrap(); + + assert_eq!(multihash1, multihash2) +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/pass/derive.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/pass/derive.rs new file mode 100644 index 000000000000..c28352de25d6 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-0.9.1/tests/pass/derive.rs @@ -0,0 +1,17 @@ +use multihash_derive::MultihashDigest; + +#[derive(Clone, Debug, Eq, PartialEq, Copy, MultihashDigest)] +#[mh(alloc_size = 32)] +pub enum Code { + /// Multihash array for hash function. + #[mh(code = 0x38b64f, hasher = multihash_codetable::Strobe256)] + Strobe256, +} + +fn main() { + assert_multihash_size_32(Code::Strobe256.digest(&[])); +} + +fn assert_multihash_size_32(_mh: multihash_derive::Multihash<32>) { + +} diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/.cargo_vcs_info.json new file mode 100644 index 000000000000..b8de23d1cab5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "4c0ef5268355308d7f083482dad1c81318db4f6b" + }, + "path_in_vcs": "derive-impl" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/Cargo.toml new file mode 100644 index 000000000000..34a42bad1e05 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/Cargo.toml @@ -0,0 +1,47 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "multihash-derive-impl" +version = "0.1.2" +authors = ["David Craven "] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Internal proc-macro crate for the MultihashDigest derive" +readme = false +license = "MIT" +repository = "https://github.com/multiformats/rust-multihash" +resolver = "2" + +[lib] +name = "multihash_derive_impl" +path = "src/lib.rs" +proc-macro = true + +[dependencies.proc-macro-crate] +version = "3.1.0" + +[dependencies.proc-macro2] +version = "1.0.24" +features = ["span-locations"] + +[dependencies.quote] +version = "1.0.7" + +[dependencies.syn] +version = "2.0.66" + +[dependencies.synstructure] +version = "0.13.1" diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/Cargo.toml.orig new file mode 100644 index 000000000000..1cd7637f39bc --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/Cargo.toml.orig @@ -0,0 +1,18 @@ +[package] +name = "multihash-derive-impl" +version = "0.1.2" +authors = ["David Craven "] +edition = "2018" +description = "Internal proc-macro crate for the MultihashDigest derive" +license = "MIT" +repository = "https://github.com/multiformats/rust-multihash" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = { version = "1.0.24", features = ["span-locations"] } +proc-macro-crate = "3.1.0" +quote = "1.0.7" +syn = "2.0.66" +synstructure = "0.13.1" diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/LICENSE b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/LICENSE new file mode 100644 index 000000000000..233fd7bd7d5a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/LICENSE @@ -0,0 +1,21 @@ +The MIT License + +Copyright (C) 2015-2016 Friedel Ziegelmayer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Status API Training Shop Blog About Pricing diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/lib.rs new file mode 100644 index 000000000000..abad0a0e7e2c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/lib.rs @@ -0,0 +1,35 @@ +//! This is an internal crate that implements the actual `MultihashDigest` derive. +//! +//! The `multihash-derive` crate acts as a facade and defines additional symbols that our derive depends on. +//! For example, the actual trait that we are deriving `MultihashDigest`, as well as the `Hasher` trait and +//! the `UnsupportedCode` error type. + +extern crate proc_macro; + +mod multihash; +mod utils; + +use proc_macro::TokenStream; +use synstructure::macros::{parse, DeriveInput}; +use synstructure::{MacroResult, Structure}; + +#[proc_macro_derive(Multihash, attributes(mh))] +#[allow(non_snake_case)] +#[deprecated(since = "0.8.1", note = "Use `MultihashDigest` derive instead.")] +pub fn Multihash(i: TokenStream) -> TokenStream { + match parse::(i) { + Ok(p) => match Structure::try_new(&p) { + Ok(s) => multihash::multihash(s).into_stream(), + Err(e) => e.to_compile_error().into(), + }, + Err(e) => e.to_compile_error().into(), + } +} + +/// Custom derive for the `MultihashDigest` trait. +#[proc_macro_derive(MultihashDigest, attributes(mh))] +#[allow(non_snake_case)] +pub fn MultihashDigest(i: TokenStream) -> TokenStream { + #[allow(deprecated)] + Multihash(i) +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/multihash.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/multihash.rs new file mode 100644 index 000000000000..0250be687544 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/multihash.rs @@ -0,0 +1,263 @@ +use std::collections::HashSet; +use std::convert::TryFrom; + +use crate::utils; +use proc_macro2::{Span, TokenStream}; +use quote::{quote, ToTokens}; +use syn::parse::{Parse, ParseStream}; +use syn::spanned::Spanned; +use synstructure::{Structure, VariantInfo}; + +mod kw { + use syn::custom_keyword; + + custom_keyword!(code); + custom_keyword!(hasher); + custom_keyword!(mh); + custom_keyword!(alloc_size); +} + +/// Attributes for the enum items. +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +enum MhAttr { + Code(utils::Attr), + Hasher(utils::Attr>), +} + +impl Parse for MhAttr { + fn parse(input: ParseStream) -> syn::Result { + if input.peek(kw::code) { + Ok(MhAttr::Code(input.parse()?)) + } else if input.peek(kw::hasher) { + Ok(MhAttr::Hasher(input.parse()?)) + } else { + Err(syn::Error::new(input.span(), "unknown attribute")) + } + } +} + +/// Attributes of the top-level derive. +#[derive(Debug)] +enum DeriveAttr { + AllocSize(utils::Attr), +} + +impl Parse for DeriveAttr { + fn parse(input: ParseStream) -> syn::Result { + if input.peek(kw::alloc_size) { + Ok(Self::AllocSize(input.parse()?)) + } else { + Err(syn::Error::new(input.span(), "unknown attribute")) + } + } +} + +struct Params { + code_enum: syn::Ident, +} + +#[derive(Debug)] +struct Hash { + ident: syn::Ident, + code: syn::Expr, + hasher: Box, +} + +impl Hash { + fn code_into_u64(&self, params: &Params) -> TokenStream { + let ident = &self.ident; + let code_enum = ¶ms.code_enum; + let code = &self.code; + quote!(#code_enum::#ident => #code) + } + + fn code_from_u64(&self) -> TokenStream { + let ident = &self.ident; + let code = &self.code; + quote!(#code => Ok(Self::#ident)) + } + + fn code_digest(&self) -> TokenStream { + let ident = &self.ident; + let hasher = &self.hasher; + let code = &self.code; + quote!(Self::#ident => { + let mut hasher = #hasher::default(); + hasher.update(input); + Multihash::wrap(#code, hasher.finalize()).unwrap() + }) + } +} + +impl<'a> TryFrom<&'a VariantInfo<'a>> for Hash { + type Error = syn::Error; + fn try_from(bi: &'a VariantInfo<'a>) -> Result { + let mut code = None; + let mut hasher = None; + for attr in bi.ast().attrs { + let attr: Result, _> = syn::parse2(attr.meta.to_token_stream()); + if let Ok(attr) = attr { + for attr in attr.attrs { + match attr { + MhAttr::Code(attr) => code = Some(attr.value), + MhAttr::Hasher(attr) => hasher = Some(attr.value), + } + } + } + } + + let ident = bi.ast().ident.clone(); + let code = code.ok_or_else(|| -> syn::Error { + let msg = "Missing code attribute: e.g. #[mh(code = multihash::SHA3_256)]"; + #[cfg(test)] + panic!("{}", msg); + #[cfg(not(test))] + syn::Error::new(bi.ast().ident.span(), msg) + })?; + let hasher = hasher.ok_or_else(|| -> syn::Error { + let msg = "Missing hasher attribute: e.g. #[mh(hasher = multihash::Sha2_256)]"; + #[cfg(test)] + panic!("{}", msg); + #[cfg(not(test))] + syn::Error::new(bi.ast().ident.span(), msg) + })?; + Ok(Self { + ident, + code, + hasher, + }) + } +} + +/// Parse top-level enum [#mh()] attributes. +/// +/// Returns the `alloc_size` and whether errors regarding to `alloc_size` should be reported or not. +fn parse_code_enum_attrs(ast: &syn::DeriveInput) -> syn::Result { + let mut alloc_size = None; + + for attr in &ast.attrs { + let derive_attrs: Result, _> = + syn::parse2(attr.meta.to_token_stream()); + + if let Ok(derive_attrs) = derive_attrs { + for derive_attr in derive_attrs.attrs { + match derive_attr { + DeriveAttr::AllocSize(alloc_size_attr) => { + alloc_size = Some(alloc_size_attr.value) + } + } + } + } + } + alloc_size.ok_or_else(|| -> syn::Error { + let msg = "enum is missing `alloc_size` attribute: e.g. #[mh(alloc_size = 64)]"; + #[cfg(test)] + panic!("{}", msg); + #[cfg(not(test))] + syn::Error::new(ast.span(), msg) + }) +} + +/// Return an error if the same code is used several times. +/// +/// This only checks for string equality, though this should still catch most errors caused by +/// copy and pasting. +fn check_error_code_duplicates(hashes: &[Hash]) -> Result<(), syn::Error> { + // Use a temporary store to determine whether a certain value is unique or not + let mut uniq = HashSet::new(); + + let mut errors = hashes.iter().filter_map(|hash| -> Option { + let code = &hash.code; + // It's a duplicate + if uniq.insert(code) { + return None; + } + + let already_defined = uniq.get(code).unwrap(); + let line = already_defined.to_token_stream().span().start().line; + + let msg = format!( + "the #mh(code) attribute `{}` is defined multiple times, previous definition at line {}", + quote!(#code), line + ); + + #[cfg(test)] + panic!("{}", msg); + #[cfg(not(test))] + Some(syn::Error::new(hash.code.span(), msg)) + }); + if let Some(mut error) = errors.next() { + error.extend(errors); + Err(error) + } else { + Ok(()) + } +} + +pub fn multihash(s: Structure) -> TokenStream { + match multihash_inner(s) { + Ok(ts) => ts, + Err(e) => e.to_compile_error(), + } +} +fn multihash_inner(s: Structure) -> syn::Result { + let mh_crate = + utils::use_crate("multihash-derive").map_err(|e| syn::Error::new(Span::call_site(), e))?; + let code_enum = &s.ast().ident; + let alloc_size = parse_code_enum_attrs(s.ast())?; + let hashes: Vec<_> = s + .variants() + .iter() + .map(Hash::try_from) + .collect::>()?; + + check_error_code_duplicates(&hashes)?; + + let params = Params { + code_enum: code_enum.clone(), + }; + + let code_into_u64 = hashes.iter().map(|h| h.code_into_u64(¶ms)); + let code_from_u64 = hashes.iter().map(|h| h.code_from_u64()); + let code_digest = hashes.iter().map(|h| h.code_digest()); + + Ok(quote! { + /// A Multihash with the same allocated size as the Multihashes produces by this derive. + pub type Multihash = #mh_crate::Multihash<#alloc_size>; + + impl #mh_crate::MultihashDigest<#alloc_size> for #code_enum { + fn digest(&self, input: &[u8]) -> Multihash { + use #mh_crate::Hasher; + match self { + #(#code_digest,)* + _ => unreachable!(), + } + } + + fn wrap(&self, digest: &[u8]) -> Result { + Multihash::wrap((*self).into(), digest) + } + } + + impl From<#code_enum> for u64 { + fn from(code: #code_enum) -> Self { + match code { + #(#code_into_u64,)* + _ => unreachable!(), + } + } + } + + impl core::convert::TryFrom for #code_enum { + type Error = #mh_crate::UnsupportedCode; + + fn try_from(code: u64) -> Result { + match code { + #(#code_from_u64,)* + _ => Err(#mh_crate::UnsupportedCode(code)) + } + } + } + }) +} diff --git a/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/utils.rs b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/utils.rs new file mode 100644 index 000000000000..fcf288b01a3f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/multihash-derive-impl-0.1.2/src/utils.rs @@ -0,0 +1,60 @@ +use proc_macro2::Span; +use proc_macro_crate::{crate_name, FoundCrate}; +use syn::parse::{Parse, ParseStream}; +use syn::punctuated::Punctuated; +use syn::Error; + +pub fn use_crate(name: &str) -> Result { + match crate_name(name) { + Ok(FoundCrate::Name(krate)) => Ok(syn::Ident::new(&krate, Span::call_site())), + Ok(FoundCrate::Itself) => Ok(syn::Ident::new("crate", Span::call_site())), + Err(err) => Err(Error::new(Span::call_site(), err)), + } +} + +#[derive(Debug)] +pub(crate) struct Attrs { + // The information is part of the parsed AST, we preserve it even if it isn't used yet. + #[allow(dead_code)] + pub ident: syn::Ident, + // The information is part of the parsed AST, we preserve it even if it isn't used yet. + #[allow(dead_code)] + pub paren: syn::token::Paren, + pub attrs: Punctuated, +} + +impl Parse for Attrs { + fn parse(input: ParseStream) -> syn::Result { + // Maybe check if ident == "mh" + let ident = input.parse()?; + let content; + let paren = syn::parenthesized!(content in input); + let attrs = content.parse_terminated(A::parse, syn::token::Comma)?; + Ok(Self { + ident, + paren, + attrs, + }) + } +} + +#[derive(Debug)] +pub(crate) struct Attr { + // The information is part of the parsed AST, we preserve it even if it isn't used yet. + #[allow(dead_code)] + pub key: K, + // The information is part of the parsed AST, we preserve it even if it isn't used yet. + #[allow(dead_code)] + pub eq: syn::token::Eq, + pub value: V, +} + +impl Parse for Attr { + fn parse(input: ParseStream) -> syn::Result { + Ok(Self { + key: input.parse()?, + eq: input.parse()?, + value: input.parse()?, + }) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.cargo_vcs_info.json deleted file mode 100644 index c0d43392ab80..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "cf2d0d12fc6ee88f51f7f81a9d1e6ded68c11b41" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.github/workflows/rust.yml b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.github/workflows/rust.yml deleted file mode 100644 index 75bc87df1113..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.github/workflows/rust.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Rust - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -env: - CARGO_TERM_COLOR: always - -jobs: - test: - name: cargo test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: dtolnay/rust-toolchain@1.42.0 - - run: cargo test --all - diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/Cargo.toml deleted file mode 100644 index 02418c02afb4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "proc-macro-crate" -version = "1.1.3" -authors = ["Bastian Köcher "] -description = "Replacement for crate (macro_rules keyword) in proc-macros\n" -documentation = "https://docs.rs/proc-macro-crate" -readme = "./README.md" -keywords = ["macro-rules", "crate", "macro", "proc-macro"] -categories = ["development-tools::procedural-macro-helpers"] -license = "Apache-2.0/MIT" -repository = "https://github.com/bkchr/proc-macro-crate" -[dependencies.thiserror] -version = "1.0.24" - -[dependencies.toml] -version = "0.5.2" -[dev-dependencies.proc-macro2] -version = "1.0.18" - -[dev-dependencies.quote] -version = "1.0.7" - -[dev-dependencies.syn] -version = "1.0.33" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/Cargo.toml.orig deleted file mode 100644 index de60f4e04892..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/Cargo.toml.orig +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "proc-macro-crate" -version = "1.1.3" -authors = ["Bastian Köcher "] -edition = "2018" -categories = [ "development-tools::procedural-macro-helpers" ] -documentation = "https://docs.rs/proc-macro-crate" -repository = "https://github.com/bkchr/proc-macro-crate" -keywords = [ "macro-rules", "crate", "macro", "proc-macro" ] -license = "Apache-2.0/MIT" -description = """ -Replacement for crate (macro_rules keyword) in proc-macros -""" -readme = "./README.md" - -[dependencies] -toml = "0.5.2" -thiserror = "1.0.24" - -[dev-dependencies] -quote = "1.0.7" -syn = "1.0.33" -proc-macro2 = "1.0.18" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/README.md b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/README.md deleted file mode 100644 index bfda755d956d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# proc-macro-crate - - -[![](https://docs.rs/proc-macro-crate/badge.svg)](https://docs.rs/proc-macro-crate/) [![](https://img.shields.io/crates/v/proc-macro-crate.svg)](https://crates.io/crates/proc-macro-crate) [![](https://img.shields.io/crates/d/proc-macro-crate.png)](https://crates.io/crates/proc-macro-crate) [![Build Status](https://travis-ci.org/bkchr/proc-macro-crate.png?branch=master)](https://travis-ci.org/bkchr/proc-macro-crate) - -Providing support for `$crate` in procedural macros. - -* [Introduction](#introduction) -* [Example](#example) -* [License](#license) - -### Introduction - -In `macro_rules!` `$crate` is used to get the path of the crate where a macro is declared in. In -procedural macros there is currently no easy way to get this path. A common hack is to import the -desired crate with a know name and use this. However, with rust edition 2018 and dropping -`extern crate` declarations from `lib.rs`, people start to rename crates in `Cargo.toml` directly. -However, this breaks importing the crate, as the proc-macro developer does not know the renamed -name of the crate that should be imported. - -This crate provides a way to get the name of a crate, even if it renamed in `Cargo.toml`. For this -purpose a single function `crate_name` is provided. This function needs to be called in the context -of a proc-macro with the name of the desired crate. `CARGO_MANIFEST_DIR` will be used to find the -current active `Cargo.toml` and this `Cargo.toml` is searched for the desired crate. - -### Example - -```rust -use quote::quote; -use syn::Ident; -use proc_macro2::Span; -use proc_macro_crate::{crate_name, FoundCrate}; - -fn import_my_crate() { - let found_crate = crate_name("my-crate").expect("my-crate is present in `Cargo.toml`"); - - match found_crate { - FoundCrate::Itself => quote!( crate::Something ), - FoundCrate::Name(name) => { - let ident = Ident::new(&name, Span::call_site()); - quote!( #ident::Something ) - } - }; -} - -``` - -### License - -Licensed under either of - - * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - - * [MIT license](http://opensource.org/licenses/MIT) - -at your option. - -License: Apache-2.0/MIT diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/src/lib.rs deleted file mode 100644 index bbc419b2f949..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/src/lib.rs +++ /dev/null @@ -1,340 +0,0 @@ -/*! - -[![](https://docs.rs/proc-macro-crate/badge.svg)](https://docs.rs/proc-macro-crate/) [![](https://img.shields.io/crates/v/proc-macro-crate.svg)](https://crates.io/crates/proc-macro-crate) [![](https://img.shields.io/crates/d/proc-macro-crate.png)](https://crates.io/crates/proc-macro-crate) [![Build Status](https://travis-ci.org/bkchr/proc-macro-crate.png?branch=master)](https://travis-ci.org/bkchr/proc-macro-crate) - -Providing support for `$crate` in procedural macros. - -* [Introduction](#introduction) -* [Example](#example) -* [License](#license) - -## Introduction - -In `macro_rules!` `$crate` is used to get the path of the crate where a macro is declared in. In -procedural macros there is currently no easy way to get this path. A common hack is to import the -desired crate with a know name and use this. However, with rust edition 2018 and dropping -`extern crate` declarations from `lib.rs`, people start to rename crates in `Cargo.toml` directly. -However, this breaks importing the crate, as the proc-macro developer does not know the renamed -name of the crate that should be imported. - -This crate provides a way to get the name of a crate, even if it renamed in `Cargo.toml`. For this -purpose a single function `crate_name` is provided. This function needs to be called in the context -of a proc-macro with the name of the desired crate. `CARGO_MANIFEST_DIR` will be used to find the -current active `Cargo.toml` and this `Cargo.toml` is searched for the desired crate. - -## Example - -``` -use quote::quote; -use syn::Ident; -use proc_macro2::Span; -use proc_macro_crate::{crate_name, FoundCrate}; - -fn import_my_crate() { - let found_crate = crate_name("my-crate").expect("my-crate is present in `Cargo.toml`"); - - match found_crate { - FoundCrate::Itself => quote!( crate::Something ), - FoundCrate::Name(name) => { - let ident = Ident::new(&name, Span::call_site()); - quote!( #ident::Something ) - } - }; -} - -# fn main() {} -``` - -## License - -Licensed under either of - - * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) - - * [MIT license](http://opensource.org/licenses/MIT) - -at your option. -*/ - -use std::{ - collections::HashMap, - env, - fs::File, - io::{self, Read}, - path::{Path, PathBuf}, -}; - -use toml::{self, value::Table}; - -type CargoToml = HashMap; - -/// Error type used by this crate. -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("Could not find `Cargo.toml` in manifest dir: `{0}`.")] - NotFound(PathBuf), - #[error("`CARGO_MANIFEST_DIR` env variable not set.")] - CargoManifestDirNotSet, - #[error("Could not read `{path}`.")] - CouldNotRead { path: PathBuf, source: io::Error }, - #[error("Invalid toml file.")] - InvalidToml { source: toml::de::Error }, - #[error("Could not find `{crate_name}` in `dependencies` or `dev-dependencies` in `{path}`!")] - CrateNotFound { crate_name: String, path: PathBuf }, -} - -/// The crate as found by [`crate_name`]. -#[derive(Debug, PartialEq, Clone, Eq)] -pub enum FoundCrate { - /// The searched crate is this crate itself. - Itself, - /// The searched crate was found with this name. - Name(String), -} - -/// Find the crate name for the given `orig_name` in the current `Cargo.toml`. -/// -/// `orig_name` should be the original name of the searched crate. -/// -/// The current `Cargo.toml` is determined by taking `CARGO_MANIFEST_DIR/Cargo.toml`. -/// -/// # Returns -/// -/// - `Ok(orig_name)` if the crate was found, but not renamed in the `Cargo.toml`. -/// - `Ok(RENAMED)` if the crate was found, but is renamed in the `Cargo.toml`. `RENAMED` will be -/// the renamed name. -/// - `Err` if an error occurred. -/// -/// The returned crate name is sanitized in such a way that it is a valid rust identifier. Thus, -/// it is ready to be used in `extern crate` as identifier. -pub fn crate_name(orig_name: &str) -> Result { - let manifest_dir = - PathBuf::from(env::var("CARGO_MANIFEST_DIR").map_err(|_| Error::CargoManifestDirNotSet)?); - - let cargo_toml_path = manifest_dir.join("Cargo.toml"); - - if !cargo_toml_path.exists() { - return Err(Error::NotFound(manifest_dir.into())); - } - - let cargo_toml = open_cargo_toml(&cargo_toml_path)?; - - extract_crate_name(orig_name, cargo_toml, &cargo_toml_path) -} - -/// Make sure that the given crate name is a valid rust identifier. -fn sanitize_crate_name>(name: S) -> String { - name.as_ref().replace("-", "_") -} - -/// Open the given `Cargo.toml` and parse it into a hashmap. -fn open_cargo_toml(path: &Path) -> Result { - let mut content = String::new(); - File::open(path) - .map_err(|e| Error::CouldNotRead { - source: e, - path: path.into(), - })? - .read_to_string(&mut content) - .map_err(|e| Error::CouldNotRead { - source: e, - path: path.into(), - })?; - toml::from_str(&content).map_err(|e| Error::InvalidToml { source: e }) -} - -/// Extract the crate name for the given `orig_name` from the given `Cargo.toml` by checking the -/// `dependencies` and `dev-dependencies`. -/// -/// Returns `Ok(orig_name)` if the crate is not renamed in the `Cargo.toml` or otherwise -/// the renamed identifier. -fn extract_crate_name( - orig_name: &str, - mut cargo_toml: CargoToml, - cargo_toml_path: &Path, -) -> Result { - if let Some(toml::Value::Table(t)) = cargo_toml.get("package") { - if let Some(toml::Value::String(s)) = t.get("name") { - if s == orig_name { - if std::env::var_os("CARGO_TARGET_TMPDIR").is_none() { - // We're running for a library/binary crate - return Ok(FoundCrate::Itself); - } else { - // We're running for an integration test - return Ok(FoundCrate::Name(sanitize_crate_name(orig_name))); - } - } - } - } - - if let Some(name) = ["dependencies", "dev-dependencies"] - .iter() - .find_map(|k| search_crate_at_key(k, orig_name, &mut cargo_toml)) - { - return Ok(FoundCrate::Name(sanitize_crate_name(name))); - } - - // Start searching `target.xy.dependencies` - if let Some(name) = cargo_toml - .remove("target") - .and_then(|t| t.try_into::().ok()) - .and_then(|t| { - t.values() - .filter_map(|v| v.as_table()) - .flat_map(|t| { - t.get("dependencies") - .into_iter() - .chain(t.get("dev-dependencies").into_iter()) - }) - .filter_map(|t| t.as_table()) - .find_map(|t| extract_crate_name_from_deps(orig_name, t.clone())) - }) - { - return Ok(FoundCrate::Name(sanitize_crate_name(name))); - } - - Err(Error::CrateNotFound { - crate_name: orig_name.into(), - path: cargo_toml_path.into(), - }) -} - -/// Search the `orig_name` crate at the given `key` in `cargo_toml`. -fn search_crate_at_key(key: &str, orig_name: &str, cargo_toml: &mut CargoToml) -> Option { - cargo_toml - .remove(key) - .and_then(|v| v.try_into::
().ok()) - .and_then(|t| extract_crate_name_from_deps(orig_name, t)) -} - -/// Extract the crate name from the given dependencies. -/// -/// Returns `Some(orig_name)` if the crate is not renamed in the `Cargo.toml` or otherwise -/// the renamed identifier. -fn extract_crate_name_from_deps(orig_name: &str, deps: Table) -> Option { - for (key, value) in deps.into_iter() { - let renamed = value - .try_into::
() - .ok() - .and_then(|t| t.get("package").cloned()) - .map(|t| t.as_str() == Some(orig_name)) - .unwrap_or(false); - - if key == orig_name || renamed { - return Some(key.clone()); - } - } - - None -} - -#[cfg(test)] -mod tests { - use super::*; - - macro_rules! create_test { - ( - $name:ident, - $cargo_toml:expr, - $( $result:tt )* - ) => { - #[test] - fn $name() { - let cargo_toml = toml::from_str($cargo_toml).expect("Parses `Cargo.toml`"); - let path = PathBuf::from("test-path"); - - match extract_crate_name("my_crate", cargo_toml, &path) { - $( $result )* => (), - o => panic!("Invalid result: {:?}", o), - } - } - }; - } - - create_test! { - deps_with_crate, - r#" - [dependencies] - my_crate = "0.1" - "#, - Ok(FoundCrate::Name(name)) if name == "my_crate" - } - - create_test! { - dev_deps_with_crate, - r#" - [dev-dependencies] - my_crate = "0.1" - "#, - Ok(FoundCrate::Name(name)) if name == "my_crate" - } - - create_test! { - deps_with_crate_renamed, - r#" - [dependencies] - cool = { package = "my_crate", version = "0.1" } - "#, - Ok(FoundCrate::Name(name)) if name == "cool" - } - - create_test! { - deps_with_crate_renamed_second, - r#" - [dependencies.cool] - package = "my_crate" - version = "0.1" - "#, - Ok(FoundCrate::Name(name)) if name == "cool" - } - - create_test! { - deps_empty, - r#" - [dependencies] - "#, - Err(Error::CrateNotFound { - crate_name, - path, - }) if crate_name == "my_crate" && path.display().to_string() == "test-path" - } - - create_test! { - crate_not_found, - r#" - [dependencies] - serde = "1.0" - "#, - Err(Error::CrateNotFound { - crate_name, - path, - }) if crate_name == "my_crate" && path.display().to_string() == "test-path" - } - - create_test! { - target_dependency, - r#" - [target.'cfg(target_os="android")'.dependencies] - my_crate = "0.1" - "#, - Ok(FoundCrate::Name(name)) if name == "my_crate" - } - - create_test! { - target_dependency2, - r#" - [target.x86_64-pc-windows-gnu.dependencies] - my_crate = "0.1" - "#, - Ok(FoundCrate::Name(name)) if name == "my_crate" - } - - create_test! { - own_crate, - r#" - [package] - name = "my_crate" - "#, - Ok(FoundCrate::Itself) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.cargo_vcs_info.json new file mode 100644 index 000000000000..9cfad8b99b70 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "a5939f3fbf94279b45902119d97f881fefca6a0d" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.github/workflows/rust.yml b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.github/workflows/rust.yml new file mode 100644 index 000000000000..6d77c7722952 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.github/workflows/rust.yml @@ -0,0 +1,46 @@ +name: Rust + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + CARGO_TERM_COLOR: always + +jobs: + test: + name: cargo test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: dtolnay/rust-toolchain@stable + - run: cargo test --all + msrv: + name: "Check MSRV: 1.67.0" + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: 1.67.0 # MSRV + - uses: Swatinem/rust-cache@v2 + - name: Default features + run: cargo test --all + rustfmt: + name: rustfmt + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@nightly + with: + toolchain: nightly + components: rustfmt + - uses: Swatinem/rust-cache@v2 + - name: Check formatting + run: cargo fmt --all -- --check diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.gitignore b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.gitignore similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.gitignore rename to third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.gitignore diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.rustfmt.toml b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.rustfmt.toml new file mode 100644 index 000000000000..4d68b555337c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.rustfmt.toml @@ -0,0 +1,23 @@ +# Basic +edition = "2021" +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true +# Format comments +comment_width = 100 +wrap_comments = true diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.travis.yml b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.travis.yml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/.travis.yml rename to third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/.travis.yml diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/Cargo.toml new file mode 100644 index 000000000000..b0d604b2f3f2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/Cargo.toml @@ -0,0 +1,56 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.67.0" +name = "proc-macro-crate" +version = "3.2.0" +authors = ["Bastian Köcher "] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +Replacement for crate (macro_rules keyword) in proc-macros +""" +documentation = "https://docs.rs/proc-macro-crate" +readme = "README.md" +keywords = [ + "macro-rules", + "crate", + "macro", + "proc-macro", +] +categories = ["development-tools::procedural-macro-helpers"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/bkchr/proc-macro-crate" + +[lib] +name = "proc_macro_crate" +path = "src/lib.rs" + +[[test]] +name = "workspace_deps" +path = "tests/workspace_deps.rs" + +[dependencies.toml_edit] +version = "0.22.20" + +[dev-dependencies.proc-macro2] +version = "1.0.86" + +[dev-dependencies.quote] +version = "1.0.37" + +[dev-dependencies.syn] +version = "2.0.76" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/Cargo.toml.orig new file mode 100644 index 000000000000..0e94526c8e84 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/Cargo.toml.orig @@ -0,0 +1,23 @@ +[package] +name = "proc-macro-crate" +version = "3.2.0" +authors = ["Bastian Köcher "] +edition = "2021" +categories = ["development-tools::procedural-macro-helpers"] +documentation = "https://docs.rs/proc-macro-crate" +repository = "https://github.com/bkchr/proc-macro-crate" +keywords = ["macro-rules", "crate", "macro", "proc-macro"] +license = "MIT OR Apache-2.0" +description = """ +Replacement for crate (macro_rules keyword) in proc-macros +""" +readme = "./README.md" +rust-version = "1.67.0" + +[dependencies] +toml_edit = "0.22.20" + +[dev-dependencies] +quote = "1.0.37" +syn = "2.0.76" +proc-macro2 = "1.0.86" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/LICENSE-APACHE similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/LICENSE-APACHE rename to third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/LICENSE-APACHE diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/LICENSE-MIT similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/proc-macro-crate-1.1.3/LICENSE-MIT rename to third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/LICENSE-MIT diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/README.md b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/README.md new file mode 100644 index 000000000000..ec09243aff32 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/README.md @@ -0,0 +1,87 @@ +# proc-macro-crate + + +[![](https://docs.rs/proc-macro-crate/badge.svg)](https://docs.rs/proc-macro-crate/) [![](https://img.shields.io/crates/v/proc-macro-crate.svg)](https://crates.io/crates/proc-macro-crate) [![](https://img.shields.io/crates/d/proc-macro-crate.png)](https://crates.io/crates/proc-macro-crate) [![Build Status](https://travis-ci.org/bkchr/proc-macro-crate.png?branch=master)](https://travis-ci.org/bkchr/proc-macro-crate) + +Providing support for `$crate` in procedural macros. + +* [Introduction](#introduction) +* [Example](#example) +* [License](#license) + +### Introduction + +In `macro_rules!` `$crate` is used to get the path of the crate where a macro is declared in. In +procedural macros there is currently no easy way to get this path. A common hack is to import the +desired crate with a know name and use this. However, with rust edition 2018 and dropping +`extern crate` declarations from `lib.rs`, people start to rename crates in `Cargo.toml` directly. +However, this breaks importing the crate, as the proc-macro developer does not know the renamed +name of the crate that should be imported. + +This crate provides a way to get the name of a crate, even if it renamed in `Cargo.toml`. For this +purpose a single function `crate_name` is provided. This function needs to be called in the context +of a proc-macro with the name of the desired crate. `CARGO_MANIFEST_DIR` will be used to find the +current active `Cargo.toml` and this `Cargo.toml` is searched for the desired crate. + +### Example + +```rust +use quote::quote; +use syn::Ident; +use proc_macro2::Span; +use proc_macro_crate::{crate_name, FoundCrate}; + +fn import_my_crate() { + let found_crate = crate_name("my-crate").expect("my-crate is present in `Cargo.toml`"); + + match found_crate { + FoundCrate::Itself => quote!( crate::Something ), + FoundCrate::Name(name) => { + let ident = Ident::new(&name, Span::call_site()); + quote!( #ident::Something ) + } + }; +} + +``` + +### Edge cases + +There are multiple edge cases when it comes to determining the correct crate. If you for example +import a crate as its own dependency, like this: + +```toml +[package] +name = "my_crate" + +[dev-dependencies] +my_crate = { version = "0.1", features = [ "test-feature" ] } +``` + +The crate will return `FoundCrate::Itself` and you will not be able to find the other instance +of your crate in `dev-dependencies`. Other similar cases are when one crate is imported multiple +times: + +```toml +[package] +name = "my_crate" + +[dependencies] +some-crate = { version = "0.5" } +some-crate-old = { package = "some-crate", version = "0.1" } +``` + +When searching for `some-crate` in this `Cargo.toml` it will return `FoundCrate::Name("some_old_crate")`, +aka the last definition of the crate in the `Cargo.toml`. + +### License + +Licensed under either of + + * [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) + + * [MIT license](https://opensource.org/licenses/MIT) + +at your option. + +License: MIT OR Apache-2.0 diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/src/lib.rs new file mode 100644 index 000000000000..fe46eb32a7fb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/src/lib.rs @@ -0,0 +1,555 @@ +/*! + +[![](https://docs.rs/proc-macro-crate/badge.svg)](https://docs.rs/proc-macro-crate/) [![](https://img.shields.io/crates/v/proc-macro-crate.svg)](https://crates.io/crates/proc-macro-crate) [![](https://img.shields.io/crates/d/proc-macro-crate.png)](https://crates.io/crates/proc-macro-crate) [![Build Status](https://travis-ci.org/bkchr/proc-macro-crate.png?branch=master)](https://travis-ci.org/bkchr/proc-macro-crate) + +Providing support for `$crate` in procedural macros. + +* [Introduction](#introduction) +* [Example](#example) +* [License](#license) + +## Introduction + +In `macro_rules!` `$crate` is used to get the path of the crate where a macro is declared in. In +procedural macros there is currently no easy way to get this path. A common hack is to import the +desired crate with a know name and use this. However, with rust edition 2018 and dropping +`extern crate` declarations from `lib.rs`, people start to rename crates in `Cargo.toml` directly. +However, this breaks importing the crate, as the proc-macro developer does not know the renamed +name of the crate that should be imported. + +This crate provides a way to get the name of a crate, even if it renamed in `Cargo.toml`. For this +purpose a single function `crate_name` is provided. This function needs to be called in the context +of a proc-macro with the name of the desired crate. `CARGO_MANIFEST_DIR` will be used to find the +current active `Cargo.toml` and this `Cargo.toml` is searched for the desired crate. + +## Example + +``` +use quote::quote; +use syn::Ident; +use proc_macro2::Span; +use proc_macro_crate::{crate_name, FoundCrate}; + +fn import_my_crate() { + let found_crate = crate_name("my-crate").expect("my-crate is present in `Cargo.toml`"); + + match found_crate { + FoundCrate::Itself => quote!( crate::Something ), + FoundCrate::Name(name) => { + let ident = Ident::new(&name, Span::call_site()); + quote!( #ident::Something ) + } + }; +} + +# fn main() {} +``` + +## Edge cases + +There are multiple edge cases when it comes to determining the correct crate. If you for example +import a crate as its own dependency, like this: + +```toml +[package] +name = "my_crate" + +[dev-dependencies] +my_crate = { version = "0.1", features = [ "test-feature" ] } +``` + +The crate will return `FoundCrate::Itself` and you will not be able to find the other instance +of your crate in `dev-dependencies`. Other similar cases are when one crate is imported multiple +times: + +```toml +[package] +name = "my_crate" + +[dependencies] +some-crate = { version = "0.5" } +some-crate-old = { package = "some-crate", version = "0.1" } +``` + +When searching for `some-crate` in this `Cargo.toml` it will return `FoundCrate::Name("some_old_crate")`, +aka the last definition of the crate in the `Cargo.toml`. + +## License + +Licensed under either of + + * [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) + + * [MIT license](https://opensource.org/licenses/MIT) + +at your option. +*/ + +use std::{ + collections::btree_map::{self, BTreeMap}, + env, fmt, fs, io, + path::{Path, PathBuf}, + process::Command, + sync::Mutex, + time::SystemTime, +}; + +use toml_edit::{DocumentMut, Item, Table, TomlError}; + +/// Error type used by this crate. +pub enum Error { + NotFound(PathBuf), + CargoManifestDirNotSet, + CargoEnvVariableNotSet, + FailedGettingWorkspaceManifestPath, + CouldNotRead { path: PathBuf, source: io::Error }, + InvalidToml { source: TomlError }, + CrateNotFound { crate_name: String, path: PathBuf }, +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::CouldNotRead { source, .. } => Some(source), + Error::InvalidToml { source } => Some(source), + _ => None, + } + } +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::NotFound(path) => + write!(f, "Could not find `Cargo.toml` in manifest dir: `{}`.", path.display()), + Error::CargoManifestDirNotSet => + f.write_str("`CARGO_MANIFEST_DIR` env variable not set."), + Error::CouldNotRead { path, .. } => write!(f, "Could not read `{}`.", path.display()), + Error::InvalidToml { .. } => f.write_str("Invalid toml file."), + Error::CrateNotFound { crate_name, path } => write!( + f, + "Could not find `{}` in `dependencies` or `dev-dependencies` in `{}`!", + crate_name, + path.display(), + ), + Error::CargoEnvVariableNotSet => f.write_str("`CARGO` env variable not set."), + Error::FailedGettingWorkspaceManifestPath => + f.write_str("Failed to get the path of the workspace manifest path."), + } + } +} + +/// The crate as found by [`crate_name`]. +#[derive(Debug, PartialEq, Clone, Eq)] +pub enum FoundCrate { + /// The searched crate is this crate itself. + Itself, + /// The searched crate was found with this name. + Name(String), +} + +// In a rustc invocation, there will only ever be one entry in this map, since every crate is +// compiled with its own rustc process. However, the same is not (currently) the case for +// rust-analyzer. +type Cache = BTreeMap; + +struct CacheEntry { + manifest_ts: SystemTime, + workspace_manifest_ts: SystemTime, + workspace_manifest_path: PathBuf, + crate_names: CrateNames, +} + +type CrateNames = BTreeMap; + +/// Find the crate name for the given `orig_name` in the current `Cargo.toml`. +/// +/// `orig_name` should be the original name of the searched crate. +/// +/// The current `Cargo.toml` is determined by taking `CARGO_MANIFEST_DIR/Cargo.toml`. +/// +/// # Returns +/// +/// - `Ok(orig_name)` if the crate was found, but not renamed in the `Cargo.toml`. +/// - `Ok(RENAMED)` if the crate was found, but is renamed in the `Cargo.toml`. `RENAMED` will be +/// the renamed name. +/// - `Err` if an error occurred. +/// +/// The returned crate name is sanitized in such a way that it is a valid rust identifier. Thus, +/// it is ready to be used in `extern crate` as identifier. +pub fn crate_name(orig_name: &str) -> Result { + let manifest_dir = env::var("CARGO_MANIFEST_DIR").map_err(|_| Error::CargoManifestDirNotSet)?; + let manifest_path = Path::new(&manifest_dir).join("Cargo.toml"); + + let manifest_ts = cargo_toml_timestamp(&manifest_path)?; + + static CACHE: Mutex = Mutex::new(BTreeMap::new()); + let mut cache = CACHE.lock().unwrap(); + + let crate_names = match cache.entry(manifest_dir) { + btree_map::Entry::Occupied(entry) => { + let cache_entry = entry.into_mut(); + let workspace_manifest_path = cache_entry.workspace_manifest_path.as_path(); + let workspace_manifest_ts = cargo_toml_timestamp(&workspace_manifest_path)?; + + // Timestamp changed, rebuild this cache entry. + if manifest_ts != cache_entry.manifest_ts || + workspace_manifest_ts != cache_entry.workspace_manifest_ts + { + *cache_entry = read_cargo_toml( + &manifest_path, + &workspace_manifest_path, + manifest_ts, + workspace_manifest_ts, + )?; + } + + &cache_entry.crate_names + }, + btree_map::Entry::Vacant(entry) => { + // If `workspace_manifest_path` returns `None`, we are probably in a vendored deps + // folder and cargo complaining that we have some package inside a workspace, that isn't + // part of the workspace. In this case we just use the `manifest_path` as the + // `workspace_manifest_path`. + let workspace_manifest_path = + workspace_manifest_path(&manifest_path)?.unwrap_or_else(|| manifest_path.clone()); + let workspace_manifest_ts = cargo_toml_timestamp(&workspace_manifest_path)?; + + let cache_entry = entry.insert(read_cargo_toml( + &manifest_path, + &workspace_manifest_path, + manifest_ts, + workspace_manifest_ts, + )?); + &cache_entry.crate_names + }, + }; + + Ok(crate_names + .get(orig_name) + .ok_or_else(|| Error::CrateNotFound { + crate_name: orig_name.to_owned(), + path: manifest_path, + })? + .clone()) +} + +fn workspace_manifest_path(cargo_toml_manifest: &Path) -> Result, Error> { + let stdout = Command::new(env::var("CARGO").map_err(|_| Error::CargoEnvVariableNotSet)?) + .arg("locate-project") + .args(&["--workspace", "--message-format=plain"]) + .arg(format!("--manifest-path={}", cargo_toml_manifest.display())) + .output() + .map_err(|_| Error::FailedGettingWorkspaceManifestPath)? + .stdout; + + String::from_utf8(stdout) + .map_err(|_| Error::FailedGettingWorkspaceManifestPath) + .map(|s| { + let path = s.trim(); + + if path.is_empty() { + None + } else { + Some(path.into()) + } + }) +} + +fn cargo_toml_timestamp(manifest_path: &Path) -> Result { + fs::metadata(manifest_path).and_then(|meta| meta.modified()).map_err(|source| { + if source.kind() == io::ErrorKind::NotFound { + Error::NotFound(manifest_path.to_owned()) + } else { + Error::CouldNotRead { path: manifest_path.to_owned(), source } + } + }) +} + +fn read_cargo_toml( + manifest_path: &Path, + workspace_manifest_path: &Path, + manifest_ts: SystemTime, + workspace_manifest_ts: SystemTime, +) -> Result { + let manifest = open_cargo_toml(manifest_path)?; + + let workspace_dependencies = if manifest_path != workspace_manifest_path { + let workspace_manifest = open_cargo_toml(workspace_manifest_path)?; + extract_workspace_dependencies(&workspace_manifest)? + } else { + extract_workspace_dependencies(&manifest)? + }; + + let crate_names = extract_crate_names(&manifest, workspace_dependencies)?; + + Ok(CacheEntry { + manifest_ts, + workspace_manifest_ts, + crate_names, + workspace_manifest_path: workspace_manifest_path.to_path_buf(), + }) +} + +/// Extract all `[workspace.dependencies]`. +/// +/// Returns a hash map that maps from dep name to the package name. Dep name +/// and package name can be the same if there doesn't exist any rename. +fn extract_workspace_dependencies( + workspace_toml: &DocumentMut, +) -> Result, Error> { + Ok(workspace_dep_tables(&workspace_toml) + .into_iter() + .flatten() + .map(move |(dep_name, dep_value)| { + let pkg_name = dep_value.get("package").and_then(|i| i.as_str()).unwrap_or(dep_name); + + (dep_name.to_owned(), pkg_name.to_owned()) + }) + .collect()) +} + +/// Return an iterator over all `[workspace.dependencies]` +fn workspace_dep_tables(cargo_toml: &DocumentMut) -> Option<&Table> { + cargo_toml + .get("workspace") + .and_then(|w| w.as_table()?.get("dependencies")?.as_table()) +} + +/// Make sure that the given crate name is a valid rust identifier. +fn sanitize_crate_name>(name: S) -> String { + name.as_ref().replace('-', "_") +} + +/// Open the given `Cargo.toml` and parse it into a hashmap. +fn open_cargo_toml(path: &Path) -> Result { + let content = fs::read_to_string(path) + .map_err(|e| Error::CouldNotRead { source: e, path: path.into() })?; + content.parse::().map_err(|e| Error::InvalidToml { source: e }) +} + +/// Extract all crate names from the given `Cargo.toml` by checking the `dependencies` and +/// `dev-dependencies`. +fn extract_crate_names( + cargo_toml: &DocumentMut, + workspace_dependencies: BTreeMap, +) -> Result { + let package_name = extract_package_name(cargo_toml); + let root_pkg = package_name.as_ref().map(|name| { + let cr = match env::var_os("CARGO_TARGET_TMPDIR") { + // We're running for a library/binary crate + None => FoundCrate::Itself, + // We're running for an integration test + Some(_) => FoundCrate::Name(sanitize_crate_name(name)), + }; + + (name.to_string(), cr) + }); + + let dep_tables = dep_tables(cargo_toml).chain(target_dep_tables(cargo_toml)); + let dep_pkgs = dep_tables.flatten().filter_map(move |(dep_name, dep_value)| { + let pkg_name = dep_value.get("package").and_then(|i| i.as_str()).unwrap_or(dep_name); + + // We already handle this via `root_pkg` above. + if package_name.as_ref().map_or(false, |n| *n == pkg_name) { + return None + } + + // Check if this is a workspace dependency. + let workspace = dep_value.get("workspace").and_then(|w| w.as_bool()).unwrap_or_default(); + + let pkg_name = workspace + .then(|| workspace_dependencies.get(pkg_name).map(|p| p.as_ref())) + .flatten() + .unwrap_or(pkg_name); + + let cr = FoundCrate::Name(sanitize_crate_name(dep_name)); + + Some((pkg_name.to_owned(), cr)) + }); + + Ok(root_pkg.into_iter().chain(dep_pkgs).collect()) +} + +fn extract_package_name(cargo_toml: &DocumentMut) -> Option<&str> { + cargo_toml.get("package")?.get("name")?.as_str() +} + +fn target_dep_tables(cargo_toml: &DocumentMut) -> impl Iterator { + cargo_toml.get("target").into_iter().filter_map(Item::as_table).flat_map(|t| { + t.iter().map(|(_, value)| value).filter_map(Item::as_table).flat_map(dep_tables) + }) +} + +fn dep_tables(table: &Table) -> impl Iterator { + table + .get("dependencies") + .into_iter() + .chain(table.get("dev-dependencies")) + .filter_map(Item::as_table) +} + +#[cfg(test)] +mod tests { + use super::*; + + macro_rules! create_test { + ( + $name:ident, + $cargo_toml:expr, + $workspace_toml:expr, + $( $result:tt )* + ) => { + #[test] + fn $name() { + let cargo_toml = $cargo_toml.parse::() + .expect("Parses `Cargo.toml`"); + let workspace_cargo_toml = $workspace_toml.parse::() + .expect("Parses workspace `Cargo.toml`"); + + let workspace_deps = extract_workspace_dependencies(&workspace_cargo_toml) + .expect("Extracts workspace dependencies"); + + match extract_crate_names(&cargo_toml, workspace_deps) + .map(|mut map| map.remove("my_crate")) + { + $( $result )* => (), + o => panic!("Invalid result: {:?}", o), + } + } + }; + } + + create_test! { + deps_with_crate, + r#" + [dependencies] + my_crate = "0.1" + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "my_crate" + } + + create_test! { + dev_deps_with_crate, + r#" + [dev-dependencies] + my_crate = "0.1" + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "my_crate" + } + + create_test! { + deps_with_crate_renamed, + r#" + [dependencies] + cool = { package = "my_crate", version = "0.1" } + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "cool" + } + + create_test! { + deps_with_crate_renamed_second, + r#" + [dependencies.cool] + package = "my_crate" + version = "0.1" + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "cool" + } + + create_test! { + deps_empty, + r#" + [dependencies] + "#, + "", + Ok(None) + } + + create_test! { + crate_not_found, + r#" + [dependencies] + serde = "1.0" + "#, + "", + Ok(None) + } + + create_test! { + target_dependency, + r#" + [target.'cfg(target_os="android")'.dependencies] + my_crate = "0.1" + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "my_crate" + } + + create_test! { + target_dependency2, + r#" + [target.x86_64-pc-windows-gnu.dependencies] + my_crate = "0.1" + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "my_crate" + } + + create_test! { + own_crate, + r#" + [package] + name = "my_crate" + "#, + "", + Ok(Some(FoundCrate::Itself)) + } + + create_test! { + own_crate_and_in_deps, + r#" + [package] + name = "my_crate" + + [dev-dependencies] + my_crate = "0.1" + "#, + "", + Ok(Some(FoundCrate::Itself)) + } + + create_test! { + multiple_times, + r#" + [dependencies] + my_crate = { version = "0.5" } + my-crate-old = { package = "my_crate", version = "0.1" } + "#, + "", + Ok(Some(FoundCrate::Name(name))) if name == "my_crate_old" + } + + create_test! { + workspace_deps, + r#" + [dependencies] + my_crate_cool = { workspace = true } + "#, + r#" + [workspace.dependencies] + my_crate_cool = { package = "my_crate" } + "#, + Ok(Some(FoundCrate::Name(name))) if name == "my_crate_cool" + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/tests/workspace_deps.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/tests/workspace_deps.rs new file mode 100644 index 000000000000..bd59a11a08f8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/proc-macro-crate-3.2.0/tests/workspace_deps.rs @@ -0,0 +1,17 @@ +use std::{path::PathBuf, process::Command}; + +#[test] +fn workspace_deps_working() { + let manifest_dir = + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/workspace_deps/Cargo.toml"); + + assert!(Command::new("cargo") + .arg("build") + .arg("--all") + .arg(format!("--manifest-path={}", manifest_dir.display())) + .spawn() + .unwrap() + .wait() + .unwrap() + .success()); +} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.cargo_vcs_info.json deleted file mode 100644 index 1561cb4ee257..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.cargo_vcs_info.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "git": { - "sha1": "e231741c47af1beda78d53aee29500cccb8266cd" - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.gitignore b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.gitignore deleted file mode 100644 index 5e81b667281d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/target -**/*.rs.bk -Cargo.lock -.fuse_hidden* diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.gitlab-ci.yml b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.gitlab-ci.yml deleted file mode 100644 index d96920c8589e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.gitlab-ci.yml +++ /dev/null @@ -1,53 +0,0 @@ -stages: - - test - - -.setup_template: &setup_template - stage: test - image: debian:stable-slim - before_script: - - export CARGO_HOME="$CI_PROJECT_DIR/.cargo" - - export PATH="$PATH:$CARGO_HOME/bin" - - export RUST_BACKTRACE=full - - apt-get update > /dev/null - - apt-get install -y curl build-essential > /dev/null - - curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $RUST_VERSION - - rustup --version - - rustc --version - - cargo --version - -.test_all_template: &test_all_template - <<: *setup_template - script: - - cargo test --all - - -test-stable: - <<: *test_all_template - variables: - RUST_VERSION: stable - -test-beta: - <<: *test_all_template - variables: - RUST_VERSION: beta - -test-nightly: - <<: *test_all_template - variables: - RUST_VERSION: nightly - - -test-1.31.0: - <<: *setup_template - script: - - cargo test --tests # skip doctests - variables: - RUST_VERSION: 1.31.0 - -test-fmt: - <<: *setup_template - script: - - cargo fmt --all -- --check - variables: - RUST_VERSION: stable diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.travis.yml b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.travis.yml deleted file mode 100644 index 362003fcd8d4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: rust -rust: - - stable - - beta - - nightly -script: - - cargo test --all -matrix: - include: - - rust: 1.31.0 - script: cargo test --tests # skip doctests - allow_failures: - - rust: nightly - fast_finish: true - - -notifications: - email: - on_success: never diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/CHANGELOG.md deleted file mode 100644 index 3c422f1c4523..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/CHANGELOG.md +++ /dev/null @@ -1,162 +0,0 @@ -# v1.0.4 (2020-7-31) - -* `SpanRange` facility is now public. -* Docs have been improved. -* Introduced the `syn-error` feature so you can opt-out from the `syn` dependency. - -# v1.0.3 (2020-6-26) - -* Corrected a few typos. -* Fixed the `emit_call_site_warning` macro. - -# v1.0.2 (2020-4-9) - -* An obsolete note was removed from documentation. - -# v1.0.1 (2020-4-9) - -* `proc-macro-hack` is now well tested and supported. Not sure about `proc-macro-nested`, - please fill a request if you need it. -* Fixed `emit_call_site_error`. -* Documentation improvements. - -# v1.0.0 (2020-3-25) - -I believe the API can be considered stable because it's been a few months without -breaking changes, and I also don't think this crate will receive much further evolution. -It's perfect, admit it. - -Hence, meet the new, stable release! - -### Improvements - -* Supported nested `#[proc_macro_error]` attributes. Well, you aren't supposed to do that, - but I caught myself doing it by accident on one occasion and the behavior was... surprising. - Better to handle this smooth. - -# v0.4.12 (2020-3-23) - -* Error message on macros' misuse is now a bit more understandable. - -# v0.4.11 (2020-3-02) - -* `build.rs` no longer fails when `rustc` date could not be determined, - (thanks to [`Fabian Möller`](https://gitlab.com/CreepySkeleton/proc-macro-error/issues/8) - for noticing and to [`Igor Gnatenko`](https://gitlab.com/CreepySkeleton/proc-macro-error/-/merge_requests/25) - for fixing). - -# v0.4.10 (2020-2-29) - -* `proc-macro-error` doesn't depend on syn\[full\] anymore, the compilation - is \~30secs faster. - -# v0.4.9 (2020-2-13) - -* New function: `append_dummy`. - -# v0.4.8 (2020-2-01) - -* Support for children messages - -# v0.4.7 (2020-1-31) - -* Now any type that implements `quote::ToTokens` can be used instead of spans. - This allows for high quality error messages. - -# v0.4.6 (2020-1-31) - -* `From` implementation doesn't lose span info anymore, see - [#6](https://gitlab.com/CreepySkeleton/proc-macro-error/issues/6). - -# v0.4.5 (2020-1-20) -Just a small intermediate release. - -* Fix some bugs. -* Populate license files into subfolders. - -# v0.4.4 (2019-11-13) -* Fix `abort_if_dirty` + warnings bug -* Allow trailing commas in macros - -# v0.4.2 (2019-11-7) -* FINALLY fixed `__pme__suggestions not found` bug - -# v0.4.1 (2019-11-7) YANKED -* Fixed `__pme__suggestions not found` bug -* Documentation improvements, links checked - -# v0.4.0 (2019-11-6) YANKED - -## New features -* "help" messages that can have their own span on nightly, they - inherit parent span on stable. - ```rust - let cond_help = if condition { Some("some help message") else { None } }; - abort!( - span, // parent span - "something's wrong, {} wrongs in total", 10; // main message - help = "here's a help for you, {}", "take it"; // unconditional help message - help =? cond_help; // conditional help message, must be Option - note = note_span => "don't forget the note, {}", "would you?" // notes can have their own span but it's effective only on nightly - ) - ``` -* Warnings via `emit_warning` and `emit_warning_call_site`. Nightly only, they're ignored on stable. -* Now `proc-macro-error` delegates to `proc_macro::Diagnostic` on nightly. - -## Breaking changes -* `MacroError` is now replaced by `Diagnostic`. Its API resembles `proc_macro::Diagnostic`. -* `Diagnostic` does not implement `From<&str/String>` so `Result::abort_or_exit()` - won't work anymore (nobody used it anyway). -* `macro_error!` macro is replaced with `diagnostic!`. - -## Improvements -* Now `proc-macro-error` renders notes exactly just like rustc does. -* We don't parse a body of a function annotated with `#[proc_macro_error]` anymore, - only looking at the signature. This should somewhat decrease expansion time for large functions. - -# v0.3.3 (2019-10-16) -* Now you can use any word instead of "help", undocumented. - -# v0.3.2 (2019-10-16) -* Introduced support for "help" messages, undocumented. - -# v0.3.0 (2019-10-8) - -## The crate has been completely rewritten from scratch! - -## Changes (most are breaking): -* Renamed macros: - * `span_error` => `abort` - * `call_site_error` => `abort_call_site` -* `filter_macro_errors` was replaced by `#[proc_macro_error]` attribute. -* `set_dummy` now takes `TokenStream` instead of `Option` -* Support for multiple errors via `emit_error` and `emit_call_site_error` -* New `macro_error` macro for building errors in format=like style. -* `MacroError` API had been reconsidered. It also now implements `quote::ToTokens`. - -# v0.2.6 (2019-09-02) -* Introduce support for dummy implementations via `dummy::set_dummy` -* `multi::*` is now deprecated, will be completely rewritten in v0.3 - -# v0.2.0 (2019-08-15) - -## Breaking changes -* `trigger_error` replaced with `MacroError::trigger` and `filter_macro_error_panics` - is hidden from docs. - This is not quite a breaking change since users weren't supposed to use these functions directly anyway. -* All dependencies are updated to `v1.*`. - -## New features -* Ability to stack multiple errors via `multi::MultiMacroErrors` and emit them at once. - -## Improvements -* Now `MacroError` implements `std::fmt::Display` instead of `std::string::ToString`. -* `MacroError::span` inherent method. -* `From for proc_macro/proc_macro2::TokenStream` implementations. -* `AsRef/AsMut for MacroError` implementations. - -# v0.1.x (2019-07-XX) - -## New features -* An easy way to report errors inside within a proc-macro via `span_error`, - `call_site_error` and `filter_macro_errors`. diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/Cargo.toml deleted file mode 100644 index 869585ffc247..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/Cargo.toml +++ /dev/null @@ -1,56 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "proc-macro-error" -version = "1.0.4" -authors = ["CreepySkeleton "] -build = "build.rs" -description = "Almost drop-in replacement to panics in proc-macros" -readme = "README.md" -keywords = ["proc-macro", "error", "errors"] -categories = ["development-tools::procedural-macro-helpers"] -license = "MIT OR Apache-2.0" -repository = "https://gitlab.com/CreepySkeleton/proc-macro-error" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] -[dependencies.proc-macro-error-attr] -version = "=1.0.4" - -[dependencies.proc-macro2] -version = "1" - -[dependencies.quote] -version = "1" - -[dependencies.syn] -version = "1" -optional = true -default-features = false -[dev-dependencies.serde_derive] -version = "=1.0.107" - -[dev-dependencies.toml] -version = "=0.5.2" - -[dev-dependencies.trybuild] -version = "1.0.19" -features = ["diff"] -[build-dependencies.version_check] -version = "0.9" - -[features] -default = ["syn-error"] -syn-error = ["syn"] -[badges.maintenance] -status = "passively-maintained" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/Cargo.toml.orig deleted file mode 100644 index 5ad358dcdf3d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/Cargo.toml.orig +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "proc-macro-error" -version = "1.0.4" -authors = ["CreepySkeleton "] -description = "Almost drop-in replacement to panics in proc-macros" - -repository = "https://gitlab.com/CreepySkeleton/proc-macro-error" -readme = "README.md" -keywords = ["proc-macro", "error", "errors"] -categories = ["development-tools::procedural-macro-helpers"] -license = "MIT OR Apache-2.0" - -edition = "2018" -build = "build.rs" - -[badges] -maintenance = { status = "passively-maintained" } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -quote = "1" -proc-macro2 = "1" -proc-macro-error-attr = { path = "./proc-macro-error-attr", version = "=1.0.4"} - -[dependencies.syn] -version = "1" -optional = true -default-features = false - -[dev-dependencies] -test-crate = { path = "./test-crate" } -proc-macro-hack-test = { path = "./test-crate/proc-macro-hack-test" } -trybuild = { version = "1.0.19", features = ["diff"] } -toml = "=0.5.2" # DO NOT BUMP -serde_derive = "=1.0.107" # DO NOT BUMP - -[build-dependencies] -version_check = "0.9" - -[features] -default = ["syn-error"] -syn-error = ["syn"] diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/LICENSE-APACHE deleted file mode 100644 index cc17374b2529..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2019-2020 CreepySkeleton - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/LICENSE-MIT deleted file mode 100644 index fc73e591d7f6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019-2020 CreepySkeleton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/README.md b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/README.md deleted file mode 100644 index 7fbe07c53a0c..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/README.md +++ /dev/null @@ -1,258 +0,0 @@ -# Makes error reporting in procedural macros nice and easy - -[![travis ci](https://travis-ci.org/CreepySkeleton/proc-macro-error.svg?branch=master)](https://travis-ci.org/CreepySkeleton/proc-macro-error) -[![docs.rs](https://docs.rs/proc-macro-error/badge.svg)](https://docs.rs/proc-macro-error) -[![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/) - -This crate aims to make error reporting in proc-macros simple and easy to use. -Migrate from `panic!`-based errors for as little effort as possible! - -Also, you can explicitly [append a dummy token stream][crate::dummy] to your errors. - -To achieve his, this crate serves as a tiny shim around `proc_macro::Diagnostic` and -`compile_error!`. It detects the most preferable way to emit errors based on compiler's version. -When the underlying diagnostic type is finally stabilized, this crate will be simply -delegating to it, requiring no changes in your code! - -So you can just use this crate and have *both* some of `proc_macro::Diagnostic` functionality -available on stable ahead of time and your error-reporting code future-proof. - -```toml -[dependencies] -proc-macro-error = "1.0" -``` - -*Supports rustc 1.31 and up* - -[Documentation and guide][guide] - -## Quick example - -Code: - -```rust -#[proc_macro] -#[proc_macro_error] -pub fn make_fn(input: TokenStream) -> TokenStream { - let mut input = TokenStream2::from(input).into_iter(); - let name = input.next().unwrap(); - if let Some(second) = input.next() { - abort! { second, - "I don't like this part!"; - note = "I see what you did there..."; - help = "I need only one part, you know?"; - } - } - - quote!( fn #name() {} ).into() -} -``` - -This is how the error is rendered in a terminal: - -

- -

- -And this is what your users will see in their IDE: - -

- -

- -## Examples - -### Panic-like usage - -```rust -use proc_macro_error::{ - proc_macro_error, - abort, - abort_call_site, - ResultExt, - OptionExt, -}; -use proc_macro::TokenStream; -use syn::{DeriveInput, parse_macro_input}; -use quote::quote; - -// This is your main entry point -#[proc_macro] -// This attribute *MUST* be placed on top of the #[proc_macro] function -#[proc_macro_error] -pub fn make_answer(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - - if let Err(err) = some_logic(&input) { - // we've got a span to blame, let's use it - // This immediately aborts the proc-macro and shows the error - // - // You can use `proc_macro::Span`, `proc_macro2::Span`, and - // anything that implements `quote::ToTokens` (almost every type from - // `syn` and `proc_macro2`) - abort!(err, "You made an error, go fix it: {}", err.msg); - } - - // `Result` has some handy shortcuts if your error type implements - // `Into`. `Option` has one unconditionally. - more_logic(&input).expect_or_abort("What a careless user, behave!"); - - if !more_logic_for_logic_god(&input) { - // We don't have an exact location this time, - // so just highlight the proc-macro invocation itself - abort_call_site!( - "Bad, bad user! Now go stand in the corner and think about what you did!"); - } - - // Now all the processing is done, return `proc_macro::TokenStream` - quote!(/* stuff */).into() -} -``` - -### `proc_macro::Diagnostic`-like usage - -```rust -use proc_macro_error::*; -use proc_macro::TokenStream; -use syn::{spanned::Spanned, DeriveInput, ItemStruct, Fields, Attribute , parse_macro_input}; -use quote::quote; - -fn process_attrs(attrs: &[Attribute]) -> Vec { - attrs - .iter() - .filter_map(|attr| match process_attr(attr) { - Ok(res) => Some(res), - Err(msg) => { - emit_error!(attr, "Invalid attribute: {}", msg); - None - } - }) - .collect() -} - -fn process_fields(_attrs: &Fields) -> Vec { - // processing fields in pretty much the same way as attributes - unimplemented!() -} - -#[proc_macro] -#[proc_macro_error] -pub fn make_answer(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as ItemStruct); - let attrs = process_attrs(&input.attrs); - - // abort right now if some errors were encountered - // at the attributes processing stage - abort_if_dirty(); - - let fields = process_fields(&input.fields); - - // no need to think about emitted errors - // #[proc_macro_error] will handle them for you - // - // just return a TokenStream as you normally would - quote!(/* stuff */).into() -} -``` - -## Real world examples - -* [`structopt-derive`](https://github.com/TeXitoi/structopt/tree/master/structopt-derive) - (abort-like usage) -* [`auto-impl`](https://github.com/auto-impl-rs/auto_impl/) (emit-like usage) - -## Limitations - -- Warnings are emitted only on nightly, they are ignored on stable. -- "help" suggestions can't have their own span info on stable, - (essentially inheriting the parent span). -- If your macro happens to trigger a panic, no errors will be displayed. This is not a - technical limitation but rather intentional design. `panic` is not for error reporting. - -## MSRV policy - -`proc_macro_error` will always be compatible with proc-macro Holy Trinity: -`proc_macro2`, `syn`, `quote` crates. In other words, if the Trinity is available -to you - `proc_macro_error` is available too. - -> **Important!** -> -> If you want to use `#[proc_macro_error]` with `synstructure`, you're going -> to have to put the attribute inside the `decl_derive!` invocation. Unfortunately, -> due to some bug in pre-1.34 rustc, putting proc-macro attributes inside macro -> invocations doesn't work, so your MSRV is effectively 1.34. - -## Motivation - -Error handling in proc-macros sucks. There's not much of a choice today: -you either "bubble up" the error up to the top-level of the macro and convert it to -a [`compile_error!`][compl_err] invocation or just use a good old panic. Both these ways suck: - -- Former sucks because it's quite redundant to unroll a proper error handling - just for critical errors that will crash the macro anyway; so people mostly - choose not to bother with it at all and use panic. Simple `.expect` is too tempting. - - Also, if you do decide to implement this `Result`-based architecture in your macro - you're going to have to rewrite it entirely once [`proc_macro::Diagnostic`][] is finally - stable. Not cool. - -- Later sucks because there's no way to carry out the span info via `panic!`. - `rustc` will highlight the invocation itself but not some specific token inside it. - - Furthermore, panics aren't for error-reporting at all; panics are for bug-detecting - (like unwrapping on `None` or out-of-range indexing) or for early development stages - when you need a prototype ASAP so error handling can wait. Mixing these usages only - messes things up. - -- There is [`proc_macro::Diagnostic`][] which is awesome but it has been experimental - for more than a year and is unlikely to be stabilized any time soon. - - This crate's API is intentionally designed to be compatible with `proc_macro::Diagnostic` - and delegates to it whenever possible. Once `Diagnostics` is stable this crate - will **always** delegate to it, no code changes will be required on user side. - -That said, we need a solution, but this solution must meet these conditions: - -- It must be better than `panic!`. The main point: it must offer a way to carry the span information - over to user. -- It must take as little effort as possible to migrate from `panic!`. Ideally, a new - macro with similar semantics plus ability to carry out span info. -- It must maintain compatibility with [`proc_macro::Diagnostic`][] . -- **It must be usable on stable**. - -This crate aims to provide such a mechanism. All you have to do is annotate your top-level -`#[proc_macro]` function with `#[proc_macro_error]` attribute and change panics to -[`abort!`]/[`abort_call_site!`] where appropriate, see [the Guide][guide]. - -## Disclaimer -Please note that **this crate is not intended to be used in any way other -than error reporting in procedural macros**, use `Result` and `?` (possibly along with one of the -many helpers out there) for anything else. - -
- -#### License - - -Licensed under either of Apache License, Version -2.0 or MIT license at your option. - - -
- - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. - - - -[compl_err]: https://doc.rust-lang.org/std/macro.compile_error.html -[`proc_macro::Diagnostic`]: https://doc.rust-lang.org/proc_macro/struct.Diagnostic.html - -[crate::dummy]: https://docs.rs/proc-macro-error/1/proc_macro_error/dummy/index.html -[crate::multi]: https://docs.rs/proc-macro-error/1/proc_macro_error/multi/index.html - -[`abort_call_site!`]: https://docs.rs/proc-macro-error/1/proc_macro_error/macro.abort_call_site.html -[`abort!`]: https://docs.rs/proc-macro-error/1/proc_macro_error/macro.abort.html -[guide]: https://docs.rs/proc-macro-error diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/build.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/build.rs deleted file mode 100644 index 3c1196f26998..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/build.rs +++ /dev/null @@ -1,11 +0,0 @@ -fn main() { - if !version_check::is_feature_flaggable().unwrap_or(false) { - println!("cargo:rustc-cfg=use_fallback"); - } - - if version_check::is_max_version("1.38.0").unwrap_or(false) - || !version_check::Channel::read().unwrap().is_stable() - { - println!("cargo:rustc-cfg=skip_ui_tests"); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/diagnostic.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/diagnostic.rs deleted file mode 100644 index 983e6174fe58..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/diagnostic.rs +++ /dev/null @@ -1,349 +0,0 @@ -use crate::{abort_now, check_correctness, sealed::Sealed, SpanRange}; -use proc_macro2::Span; -use proc_macro2::TokenStream; - -use quote::{quote_spanned, ToTokens}; - -/// Represents a diagnostic level -/// -/// # Warnings -/// -/// Warnings are ignored on stable/beta -#[derive(Debug, PartialEq)] -pub enum Level { - Error, - Warning, - #[doc(hidden)] - NonExhaustive, -} - -/// Represents a single diagnostic message -#[derive(Debug)] -pub struct Diagnostic { - pub(crate) level: Level, - pub(crate) span_range: SpanRange, - pub(crate) msg: String, - pub(crate) suggestions: Vec<(SuggestionKind, String, Option)>, - pub(crate) children: Vec<(SpanRange, String)>, -} - -/// A collection of methods that do not exist in `proc_macro::Diagnostic` -/// but still useful to have around. -/// -/// This trait is sealed and cannot be implemented outside of `proc_macro_error`. -pub trait DiagnosticExt: Sealed { - /// Create a new diagnostic message that points to the `span_range`. - /// - /// This function is the same as `Diagnostic::spanned` but produces considerably - /// better error messages for multi-token spans on stable. - fn spanned_range(span_range: SpanRange, level: Level, message: String) -> Self; - - /// Add another error message to self such that it will be emitted right after - /// the main message. - /// - /// This function is the same as `Diagnostic::span_error` but produces considerably - /// better error messages for multi-token spans on stable. - fn span_range_error(self, span_range: SpanRange, msg: String) -> Self; - - /// Attach a "help" note to your main message, the note will have it's own span on nightly. - /// - /// This function is the same as `Diagnostic::span_help` but produces considerably - /// better error messages for multi-token spans on stable. - /// - /// # Span - /// - /// The span is ignored on stable, the note effectively inherits its parent's (main message) span - fn span_range_help(self, span_range: SpanRange, msg: String) -> Self; - - /// Attach a note to your main message, the note will have it's own span on nightly. - /// - /// This function is the same as `Diagnostic::span_note` but produces considerably - /// better error messages for multi-token spans on stable. - /// - /// # Span - /// - /// The span is ignored on stable, the note effectively inherits its parent's (main message) span - fn span_range_note(self, span_range: SpanRange, msg: String) -> Self; -} - -impl DiagnosticExt for Diagnostic { - fn spanned_range(span_range: SpanRange, level: Level, message: String) -> Self { - Diagnostic { - level, - span_range, - msg: message, - suggestions: vec![], - children: vec![], - } - } - - fn span_range_error(mut self, span_range: SpanRange, msg: String) -> Self { - self.children.push((span_range, msg)); - self - } - - fn span_range_help(mut self, span_range: SpanRange, msg: String) -> Self { - self.suggestions - .push((SuggestionKind::Help, msg, Some(span_range))); - self - } - - fn span_range_note(mut self, span_range: SpanRange, msg: String) -> Self { - self.suggestions - .push((SuggestionKind::Note, msg, Some(span_range))); - self - } -} - -impl Diagnostic { - /// Create a new diagnostic message that points to `Span::call_site()` - pub fn new(level: Level, message: String) -> Self { - Diagnostic::spanned(Span::call_site(), level, message) - } - - /// Create a new diagnostic message that points to the `span` - pub fn spanned(span: Span, level: Level, message: String) -> Self { - Diagnostic::spanned_range( - SpanRange { - first: span, - last: span, - }, - level, - message, - ) - } - - /// Add another error message to self such that it will be emitted right after - /// the main message. - pub fn span_error(self, span: Span, msg: String) -> Self { - self.span_range_error( - SpanRange { - first: span, - last: span, - }, - msg, - ) - } - - /// Attach a "help" note to your main message, the note will have it's own span on nightly. - /// - /// # Span - /// - /// The span is ignored on stable, the note effectively inherits its parent's (main message) span - pub fn span_help(self, span: Span, msg: String) -> Self { - self.span_range_help( - SpanRange { - first: span, - last: span, - }, - msg, - ) - } - - /// Attach a "help" note to your main message. - pub fn help(mut self, msg: String) -> Self { - self.suggestions.push((SuggestionKind::Help, msg, None)); - self - } - - /// Attach a note to your main message, the note will have it's own span on nightly. - /// - /// # Span - /// - /// The span is ignored on stable, the note effectively inherits its parent's (main message) span - pub fn span_note(self, span: Span, msg: String) -> Self { - self.span_range_note( - SpanRange { - first: span, - last: span, - }, - msg, - ) - } - - /// Attach a note to your main message - pub fn note(mut self, msg: String) -> Self { - self.suggestions.push((SuggestionKind::Note, msg, None)); - self - } - - /// The message of main warning/error (no notes attached) - pub fn message(&self) -> &str { - &self.msg - } - - /// Abort the proc-macro's execution and display the diagnostic. - /// - /// # Warnings - /// - /// Warnings are not emitted on stable and beta, but this function will abort anyway. - pub fn abort(self) -> ! { - self.emit(); - abort_now() - } - - /// Display the diagnostic while not aborting macro execution. - /// - /// # Warnings - /// - /// Warnings are ignored on stable/beta - pub fn emit(self) { - check_correctness(); - crate::imp::emit_diagnostic(self); - } -} - -/// **NOT PUBLIC API! NOTHING TO SEE HERE!!!** -#[doc(hidden)] -impl Diagnostic { - pub fn span_suggestion(self, span: Span, suggestion: &str, msg: String) -> Self { - match suggestion { - "help" | "hint" => self.span_help(span, msg), - _ => self.span_note(span, msg), - } - } - - pub fn suggestion(self, suggestion: &str, msg: String) -> Self { - match suggestion { - "help" | "hint" => self.help(msg), - _ => self.note(msg), - } - } -} - -impl ToTokens for Diagnostic { - fn to_tokens(&self, ts: &mut TokenStream) { - use std::borrow::Cow; - - fn ensure_lf(buf: &mut String, s: &str) { - if s.ends_with('\n') { - buf.push_str(s); - } else { - buf.push_str(s); - buf.push('\n'); - } - } - - fn diag_to_tokens( - span_range: SpanRange, - level: &Level, - msg: &str, - suggestions: &[(SuggestionKind, String, Option)], - ) -> TokenStream { - if *level == Level::Warning { - return TokenStream::new(); - } - - let message = if suggestions.is_empty() { - Cow::Borrowed(msg) - } else { - let mut message = String::new(); - ensure_lf(&mut message, msg); - message.push('\n'); - - for (kind, note, _span) in suggestions { - message.push_str(" = "); - message.push_str(kind.name()); - message.push_str(": "); - ensure_lf(&mut message, note); - } - message.push('\n'); - - Cow::Owned(message) - }; - - let mut msg = proc_macro2::Literal::string(&message); - msg.set_span(span_range.last); - let group = quote_spanned!(span_range.last=> { #msg } ); - quote_spanned!(span_range.first=> compile_error!#group) - } - - ts.extend(diag_to_tokens( - self.span_range, - &self.level, - &self.msg, - &self.suggestions, - )); - ts.extend( - self.children - .iter() - .map(|(span_range, msg)| diag_to_tokens(*span_range, &Level::Error, &msg, &[])), - ); - } -} - -#[derive(Debug)] -pub(crate) enum SuggestionKind { - Help, - Note, -} - -impl SuggestionKind { - fn name(&self) -> &'static str { - match self { - SuggestionKind::Note => "note", - SuggestionKind::Help => "help", - } - } -} - -#[cfg(feature = "syn-error")] -impl From for Diagnostic { - fn from(err: syn::Error) -> Self { - use proc_macro2::{Delimiter, TokenTree}; - - fn gut_error(ts: &mut impl Iterator) -> Option<(SpanRange, String)> { - let first = match ts.next() { - // compile_error - None => return None, - Some(tt) => tt.span(), - }; - ts.next().unwrap(); // ! - - let lit = match ts.next().unwrap() { - TokenTree::Group(group) => { - // Currently `syn` builds `compile_error!` invocations - // exclusively in `ident{"..."}` (braced) form which is not - // followed by `;` (semicolon). - // - // But if it changes to `ident("...");` (parenthesized) - // or `ident["..."];` (bracketed) form, - // we will need to skip the `;` as well. - // Highly unlikely, but better safe than sorry. - - if group.delimiter() == Delimiter::Parenthesis - || group.delimiter() == Delimiter::Bracket - { - ts.next().unwrap(); // ; - } - - match group.stream().into_iter().next().unwrap() { - TokenTree::Literal(lit) => lit, - _ => unreachable!(), - } - } - _ => unreachable!(), - }; - - let last = lit.span(); - let mut msg = lit.to_string(); - - // "abc" => abc - msg.pop(); - msg.remove(0); - - Some((SpanRange { first, last }, msg)) - } - - let mut ts = err.to_compile_error().into_iter(); - - let (span_range, msg) = gut_error(&mut ts).unwrap(); - let mut res = Diagnostic::spanned_range(span_range, Level::Error, msg); - - while let Some((span_range, msg)) = gut_error(&mut ts) { - res = res.span_range_error(span_range, msg); - } - - res - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/dummy.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/dummy.rs deleted file mode 100644 index 571a595aa9cc..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/dummy.rs +++ /dev/null @@ -1,150 +0,0 @@ -//! Facility to emit dummy implementations (or whatever) in case -//! an error happen. -//! -//! `compile_error!` does not abort a compilation right away. This means -//! `rustc` doesn't just show you the error and abort, it carries on the -//! compilation process looking for other errors to report. -//! -//! Let's consider an example: -//! -//! ```rust,ignore -//! use proc_macro::TokenStream; -//! use proc_macro_error::*; -//! -//! trait MyTrait { -//! fn do_thing(); -//! } -//! -//! // this proc macro is supposed to generate MyTrait impl -//! #[proc_macro_derive(MyTrait)] -//! #[proc_macro_error] -//! fn example(input: TokenStream) -> TokenStream { -//! // somewhere deep inside -//! abort!(span, "something's wrong"); -//! -//! // this implementation will be generated if no error happened -//! quote! { -//! impl MyTrait for #name { -//! fn do_thing() {/* whatever */} -//! } -//! } -//! } -//! -//! // ================ -//! // in main.rs -//! -//! // this derive triggers an error -//! #[derive(MyTrait)] // first BOOM! -//! struct Foo; -//! -//! fn main() { -//! Foo::do_thing(); // second BOOM! -//! } -//! ``` -//! -//! The problem is: the generated token stream contains only `compile_error!` -//! invocation, the impl was not generated. That means user will see two compilation -//! errors: -//! -//! ```text -//! error: something's wrong -//! --> $DIR/probe.rs:9:10 -//! | -//! 9 |#[proc_macro_derive(MyTrait)] -//! | ^^^^^^^ -//! -//! error[E0599]: no function or associated item named `do_thing` found for type `Foo` in the current scope -//! --> src\main.rs:3:10 -//! | -//! 1 | struct Foo; -//! | ----------- function or associated item `do_thing` not found for this -//! 2 | fn main() { -//! 3 | Foo::do_thing(); // second BOOM! -//! | ^^^^^^^^ function or associated item not found in `Foo` -//! ``` -//! -//! But the second error is meaningless! We definitely need to fix this. -//! -//! Most used approach in cases like this is "dummy implementation" - -//! omit `impl MyTrait for #name` and fill functions bodies with `unimplemented!()`. -//! -//! This is how you do it: -//! -//! ```rust,ignore -//! use proc_macro::TokenStream; -//! use proc_macro_error::*; -//! -//! trait MyTrait { -//! fn do_thing(); -//! } -//! -//! // this proc macro is supposed to generate MyTrait impl -//! #[proc_macro_derive(MyTrait)] -//! #[proc_macro_error] -//! fn example(input: TokenStream) -> TokenStream { -//! // first of all - we set a dummy impl which will be appended to -//! // `compile_error!` invocations in case a trigger does happen -//! set_dummy(quote! { -//! impl MyTrait for #name { -//! fn do_thing() { unimplemented!() } -//! } -//! }); -//! -//! // somewhere deep inside -//! abort!(span, "something's wrong"); -//! -//! // this implementation will be generated if no error happened -//! quote! { -//! impl MyTrait for #name { -//! fn do_thing() {/* whatever */} -//! } -//! } -//! } -//! -//! // ================ -//! // in main.rs -//! -//! // this derive triggers an error -//! #[derive(MyTrait)] // first BOOM! -//! struct Foo; -//! -//! fn main() { -//! Foo::do_thing(); // no more errors! -//! } -//! ``` - -use proc_macro2::TokenStream; -use std::cell::RefCell; - -use crate::check_correctness; - -thread_local! { - static DUMMY_IMPL: RefCell> = RefCell::new(None); -} - -/// Sets dummy token stream which will be appended to `compile_error!(msg);...` -/// invocations in case you'll emit any errors. -/// -/// See [guide](../index.html#guide). -pub fn set_dummy(dummy: TokenStream) -> Option { - check_correctness(); - DUMMY_IMPL.with(|old_dummy| old_dummy.replace(Some(dummy))) -} - -/// Same as [`set_dummy`] but, instead of resetting, appends tokens to the -/// existing dummy (if any). Behaves as `set_dummy` if no dummy is present. -pub fn append_dummy(dummy: TokenStream) { - check_correctness(); - DUMMY_IMPL.with(|old_dummy| { - let mut cell = old_dummy.borrow_mut(); - if let Some(ts) = cell.as_mut() { - ts.extend(dummy); - } else { - *cell = Some(dummy); - } - }); -} - -pub(crate) fn cleanup() -> Option { - DUMMY_IMPL.with(|old_dummy| old_dummy.replace(None)) -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/imp/delegate.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/imp/delegate.rs deleted file mode 100644 index 07def2b98e5f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/imp/delegate.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! This implementation uses [`proc_macro::Diagnostic`], nightly only. - -use std::cell::Cell; - -use proc_macro::{Diagnostic as PDiag, Level as PLevel}; - -use crate::{ - abort_now, check_correctness, - diagnostic::{Diagnostic, Level, SuggestionKind}, -}; - -pub fn abort_if_dirty() { - check_correctness(); - if IS_DIRTY.with(|c| c.get()) { - abort_now() - } -} - -pub(crate) fn cleanup() -> Vec { - IS_DIRTY.with(|c| c.set(false)); - vec![] -} - -pub(crate) fn emit_diagnostic(diag: Diagnostic) { - let Diagnostic { - level, - span_range, - msg, - suggestions, - children, - } = diag; - - let span = span_range.collapse().unwrap(); - - let level = match level { - Level::Warning => PLevel::Warning, - Level::Error => { - IS_DIRTY.with(|c| c.set(true)); - PLevel::Error - } - _ => unreachable!(), - }; - - let mut res = PDiag::spanned(span, level, msg); - - for (kind, msg, span) in suggestions { - res = match (kind, span) { - (SuggestionKind::Note, Some(span_range)) => { - res.span_note(span_range.collapse().unwrap(), msg) - } - (SuggestionKind::Help, Some(span_range)) => { - res.span_help(span_range.collapse().unwrap(), msg) - } - (SuggestionKind::Note, None) => res.note(msg), - (SuggestionKind::Help, None) => res.help(msg), - } - } - - for (span_range, msg) in children { - let span = span_range.collapse().unwrap(); - res = res.span_error(span, msg); - } - - res.emit() -} - -thread_local! { - static IS_DIRTY: Cell = Cell::new(false); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/imp/fallback.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/imp/fallback.rs deleted file mode 100644 index ad1f730bfcfb..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/imp/fallback.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! This implementation uses self-written stable facilities. - -use crate::{ - abort_now, check_correctness, - diagnostic::{Diagnostic, Level}, -}; -use std::cell::RefCell; - -pub fn abort_if_dirty() { - check_correctness(); - ERR_STORAGE.with(|storage| { - if !storage.borrow().is_empty() { - abort_now() - } - }); -} - -pub(crate) fn cleanup() -> Vec { - ERR_STORAGE.with(|storage| storage.replace(Vec::new())) -} - -pub(crate) fn emit_diagnostic(diag: Diagnostic) { - if diag.level == Level::Error { - ERR_STORAGE.with(|storage| storage.borrow_mut().push(diag)); - } -} - -thread_local! { - static ERR_STORAGE: RefCell> = RefCell::new(Vec::new()); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/lib.rs deleted file mode 100644 index fb867fdc03a1..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/lib.rs +++ /dev/null @@ -1,560 +0,0 @@ -//! # proc-macro-error -//! -//! This crate aims to make error reporting in proc-macros simple and easy to use. -//! Migrate from `panic!`-based errors for as little effort as possible! -//! -//! (Also, you can explicitly [append a dummy token stream](dummy/index.html) to your errors). -//! -//! To achieve his, this crate serves as a tiny shim around `proc_macro::Diagnostic` and -//! `compile_error!`. It detects the best way of emitting available based on compiler's version. -//! When the underlying diagnostic type is finally stabilized, this crate will simply be -//! delegating to it requiring no changes in your code! -//! -//! So you can just use this crate and have *both* some of `proc_macro::Diagnostic` functionality -//! available on stable ahead of time *and* your error-reporting code future-proof. -//! -//! ## Cargo features -//! -//! This crate provides *enabled by default* `syn-error` feature that gates -//! `impl From for Diagnostic` conversion. If you don't use `syn` and want -//! to cut off some of compilation time, you can disable it via -//! -//! ```toml -//! [dependencies] -//! proc-macro-error = { version = "1", default-features = false } -//! ``` -//! -//! ***Please note that disabling this feature makes sense only if you don't depend on `syn` -//! directly or indirectly, and you very likely do.** -//! -//! ## Real world examples -//! -//! * [`structopt-derive`](https://github.com/TeXitoi/structopt/tree/master/structopt-derive) -//! (abort-like usage) -//! * [`auto-impl`](https://github.com/auto-impl-rs/auto_impl/) (emit-like usage) -//! -//! ## Limitations -//! -//! - Warnings are emitted only on nightly, they are ignored on stable. -//! - "help" suggestions can't have their own span info on stable, -//! (essentially inheriting the parent span). -//! - If a panic occurs somewhere in your macro no errors will be displayed. This is not a -//! technical limitation but rather intentional design. `panic` is not for error reporting. -//! -//! ### `#[proc_macro_error]` attribute -//! -//! **This attribute MUST be present on the top level of your macro** (the function -//! annotated with any of `#[proc_macro]`, `#[proc_macro_derive]`, `#[proc_macro_attribute]`). -//! -//! This attribute performs the setup and cleanup necessary to make things work. -//! -//! In most cases you'll need the simple `#[proc_macro_error]` form without any -//! additional settings. Feel free to [skip the "Syntax" section](#macros). -//! -//! #### Syntax -//! -//! `#[proc_macro_error]` or `#[proc_macro_error(settings...)]`, where `settings...` -//! is a comma-separated list of: -//! -//! - `proc_macro_hack`: -//! -//! In order to correctly cooperate with `#[proc_macro_hack]`, `#[proc_macro_error]` -//! attribute must be placed *before* (above) it, like this: -//! -//! ```no_run -//! # use proc_macro2::TokenStream; -//! # const IGNORE: &str = " -//! #[proc_macro_error] -//! #[proc_macro_hack] -//! #[proc_macro] -//! # "; -//! fn my_macro(input: TokenStream) -> TokenStream { -//! unimplemented!() -//! } -//! ``` -//! -//! If, for some reason, you can't place it like that you can use -//! `#[proc_macro_error(proc_macro_hack)]` instead. -//! -//! # Note -//! -//! If `proc-macro-hack` was detected (by any means) `allow_not_macro` -//! and `assert_unwind_safe` will be applied automatically. -//! -//! - `allow_not_macro`: -//! -//! By default, the attribute checks that it's applied to a proc-macro. -//! If none of `#[proc_macro]`, `#[proc_macro_derive]` nor `#[proc_macro_attribute]` are -//! present it will panic. It's the intention - this crate is supposed to be used only with -//! proc-macros. -//! -//! This setting is made to bypass the check, useful in certain circumstances. -//! -//! Pay attention: the function this attribute is applied to must return -//! `proc_macro::TokenStream`. -//! -//! This setting is implied if `proc-macro-hack` was detected. -//! -//! - `assert_unwind_safe`: -//! -//! By default, your code must be [unwind safe]. If your code is not unwind safe, -//! but you believe it's correct, you can use this setting to bypass the check. -//! You would need this for code that uses `lazy_static` or `thread_local` with -//! `Cell/RefCell` inside (and the like). -//! -//! This setting is implied if `#[proc_macro_error]` is applied to a function -//! marked as `#[proc_macro]`, `#[proc_macro_derive]` or `#[proc_macro_attribute]`. -//! -//! This setting is also implied if `proc-macro-hack` was detected. -//! -//! ## Macros -//! -//! Most of the time you want to use the macros. Syntax is described in the next section below. -//! -//! You'll need to decide how you want to emit errors: -//! -//! * Emit the error and abort. Very much panic-like usage. Served by [`abort!`] and -//! [`abort_call_site!`]. -//! * Emit the error but do not abort right away, looking for other errors to report. -//! Served by [`emit_error!`] and [`emit_call_site_error!`]. -//! -//! You **can** mix these usages. -//! -//! `abort` and `emit_error` take a "source span" as the first argument. This source -//! will be used to highlight the place the error originates from. It must be one of: -//! -//! * *Something* that implements [`ToTokens`] (most types in `syn` and `proc-macro2` do). -//! This source is the preferable one since it doesn't lose span information on multi-token -//! spans, see [this issue](https://gitlab.com/CreepySkeleton/proc-macro-error/-/issues/6) -//! for details. -//! * [`proc_macro::Span`] -//! * [`proc-macro2::Span`] -//! -//! The rest is your message in format-like style. -//! -//! See [the next section](#syntax-1) for detailed syntax. -//! -//! - [`abort!`]: -//! -//! Very much panic-like usage - abort right away and show the error. -//! Expands to [`!`] (never type). -//! -//! - [`abort_call_site!`]: -//! -//! Shortcut for `abort!(Span::call_site(), ...)`. Expands to [`!`] (never type). -//! -//! - [`emit_error!`]: -//! -//! [`proc_macro::Diagnostic`]-like usage - emit the error but keep going, -//! looking for other errors to report. -//! The compilation will fail nonetheless. Expands to [`()`] (unit type). -//! -//! - [`emit_call_site_error!`]: -//! -//! Shortcut for `emit_error!(Span::call_site(), ...)`. Expands to [`()`] (unit type). -//! -//! - [`emit_warning!`]: -//! -//! Like `emit_error!` but emit a warning instead of error. The compilation won't fail -//! because of warnings. -//! Expands to [`()`] (unit type). -//! -//! **Beware**: warnings are nightly only, they are completely ignored on stable. -//! -//! - [`emit_call_site_warning!`]: -//! -//! Shortcut for `emit_warning!(Span::call_site(), ...)`. Expands to [`()`] (unit type). -//! -//! - [`diagnostic`]: -//! -//! Build an instance of `Diagnostic` in format-like style. -//! -//! #### Syntax -//! -//! All the macros have pretty much the same syntax: -//! -//! 1. ```ignore -//! abort!(single_expr) -//! ``` -//! Shortcut for `Diagnostic::from(expr).abort()`. -//! -//! 2. ```ignore -//! abort!(span, message) -//! ``` -//! The first argument is an expression the span info should be taken from. -//! -//! The second argument is the error message, it must implement [`ToString`]. -//! -//! 3. ```ignore -//! abort!(span, format_literal, format_args...) -//! ``` -//! -//! This form is pretty much the same as 2, except `format!(format_literal, format_args...)` -//! will be used to for the message instead of [`ToString`]. -//! -//! That's it. `abort!`, `emit_warning`, `emit_error` share this exact syntax. -//! -//! `abort_call_site!`, `emit_call_site_warning`, `emit_call_site_error` lack 1 form -//! and do not take span in 2'th and 3'th forms. Those are essentially shortcuts for -//! `macro!(Span::call_site(), args...)`. -//! -//! `diagnostic!` requires a [`Level`] instance between `span` and second argument -//! (1'th form is the same). -//! -//! > **Important!** -//! > -//! > If you have some type from `proc_macro` or `syn` to point to, do not call `.span()` -//! > on it but rather use it directly: -//! > ```no_run -//! > # use proc_macro_error::abort; -//! > # let input = proc_macro2::TokenStream::new(); -//! > let ty: syn::Type = syn::parse2(input).unwrap(); -//! > abort!(ty, "BOOM"); -//! > // ^^ <-- avoid .span() -//! > ``` -//! > -//! > `.span()` calls work too, but you may experience regressions in message quality. -//! -//! #### Note attachments -//! -//! 3. Every macro can have "note" attachments (only 2 and 3 form). -//! ```ignore -//! let opt_help = if have_some_info { Some("did you mean `this`?") } else { None }; -//! -//! abort!( -//! span, message; // <--- attachments start with `;` (semicolon) -//! -//! help = "format {} {}", "arg1", "arg2"; // <--- every attachment ends with `;`, -//! // maybe except the last one -//! -//! note = "to_string"; // <--- one arg uses `.to_string()` instead of `format!()` -//! -//! yay = "I see what {} did here", "you"; // <--- "help =" and "hint =" are mapped -//! // to Diagnostic::help, -//! // anything else is Diagnostic::note -//! -//! wow = note_span => "custom span"; // <--- attachments can have their own span -//! // it takes effect only on nightly though -//! -//! hint =? opt_help; // <-- "optional" attachment, get displayed only if `Some` -//! // must be single `Option` expression -//! -//! note =? note_span => opt_help // <-- optional attachments can have custom spans too -//! ); -//! ``` -//! - -//! ### Diagnostic type -//! -//! [`Diagnostic`] type is intentionally designed to be API compatible with [`proc_macro::Diagnostic`]. -//! Not all API is implemented, only the part that can be reasonably implemented on stable. -//! -//! -//! [`abort!`]: macro.abort.html -//! [`abort_call_site!`]: macro.abort_call_site.html -//! [`emit_warning!`]: macro.emit_warning.html -//! [`emit_error!`]: macro.emit_error.html -//! [`emit_call_site_warning!`]: macro.emit_call_site_error.html -//! [`emit_call_site_error!`]: macro.emit_call_site_warning.html -//! [`diagnostic!`]: macro.diagnostic.html -//! [`Diagnostic`]: struct.Diagnostic.html -//! -//! [`proc_macro::Span`]: https://doc.rust-lang.org/proc_macro/struct.Span.html -//! [`proc_macro::Diagnostic`]: https://doc.rust-lang.org/proc_macro/struct.Diagnostic.html -//! -//! [unwind safe]: https://doc.rust-lang.org/std/panic/trait.UnwindSafe.html#what-is-unwind-safety -//! [`!`]: https://doc.rust-lang.org/std/primitive.never.html -//! [`()`]: https://doc.rust-lang.org/std/primitive.unit.html -//! [`ToString`]: https://doc.rust-lang.org/std/string/trait.ToString.html -//! -//! [`proc-macro2::Span`]: https://docs.rs/proc-macro2/1.0.10/proc_macro2/struct.Span.html -//! [`ToTokens`]: https://docs.rs/quote/1.0.3/quote/trait.ToTokens.html -//! - -#![cfg_attr(not(use_fallback), feature(proc_macro_diagnostic))] -#![forbid(unsafe_code)] -#![allow(clippy::needless_doctest_main)] - -extern crate proc_macro; - -pub use crate::{ - diagnostic::{Diagnostic, DiagnosticExt, Level}, - dummy::{append_dummy, set_dummy}, -}; -pub use proc_macro_error_attr::proc_macro_error; - -use proc_macro2::Span; -use quote::{quote, ToTokens}; - -use std::cell::Cell; -use std::panic::{catch_unwind, resume_unwind, UnwindSafe}; - -pub mod dummy; - -mod diagnostic; -mod macros; -mod sealed; - -#[cfg(use_fallback)] -#[path = "imp/fallback.rs"] -mod imp; - -#[cfg(not(use_fallback))] -#[path = "imp/delegate.rs"] -mod imp; - -#[derive(Debug, Clone, Copy)] -pub struct SpanRange { - pub first: Span, - pub last: Span, -} - -impl SpanRange { - /// Create a range with the `first` and `last` spans being the same. - pub fn single_span(span: Span) -> Self { - SpanRange { - first: span, - last: span, - } - } - - /// Create a `SpanRange` resolving at call site. - pub fn call_site() -> Self { - SpanRange::single_span(Span::call_site()) - } - - /// Construct span range from a `TokenStream`. This method always preserves all the - /// range. - /// - /// ### Note - /// - /// If the stream is empty, the result is `SpanRange::call_site()`. If the stream - /// consists of only one `TokenTree`, the result is `SpanRange::single_span(tt.span())` - /// that doesn't lose anything. - pub fn from_tokens(ts: &dyn ToTokens) -> Self { - let mut spans = ts.to_token_stream().into_iter().map(|tt| tt.span()); - let first = spans.next().unwrap_or_else(|| Span::call_site()); - let last = spans.last().unwrap_or(first); - - SpanRange { first, last } - } - - /// Join two span ranges. The resulting range will start at `self.first` and end at - /// `other.last`. - pub fn join_range(self, other: SpanRange) -> Self { - SpanRange { - first: self.first, - last: other.last, - } - } - - /// Collapse the range into single span, preserving as much information as possible. - pub fn collapse(self) -> Span { - self.first.join(self.last).unwrap_or(self.first) - } -} - -/// This traits expands `Result>` with some handy shortcuts. -pub trait ResultExt { - type Ok; - - /// Behaves like `Result::unwrap`: if self is `Ok` yield the contained value, - /// otherwise abort macro execution via `abort!`. - fn unwrap_or_abort(self) -> Self::Ok; - - /// Behaves like `Result::expect`: if self is `Ok` yield the contained value, - /// otherwise abort macro execution via `abort!`. - /// If it aborts then resulting error message will be preceded with `message`. - fn expect_or_abort(self, msg: &str) -> Self::Ok; -} - -/// This traits expands `Option` with some handy shortcuts. -pub trait OptionExt { - type Some; - - /// Behaves like `Option::expect`: if self is `Some` yield the contained value, - /// otherwise abort macro execution via `abort_call_site!`. - /// If it aborts the `message` will be used for [`compile_error!`][compl_err] invocation. - /// - /// [compl_err]: https://doc.rust-lang.org/std/macro.compile_error.html - fn expect_or_abort(self, msg: &str) -> Self::Some; -} - -/// Abort macro execution and display all the emitted errors, if any. -/// -/// Does nothing if no errors were emitted (warnings do not count). -pub fn abort_if_dirty() { - imp::abort_if_dirty(); -} - -impl> ResultExt for Result { - type Ok = T; - - fn unwrap_or_abort(self) -> T { - match self { - Ok(res) => res, - Err(e) => e.into().abort(), - } - } - - fn expect_or_abort(self, message: &str) -> T { - match self { - Ok(res) => res, - Err(e) => { - let mut e = e.into(); - e.msg = format!("{}: {}", message, e.msg); - e.abort() - } - } - } -} - -impl OptionExt for Option { - type Some = T; - - fn expect_or_abort(self, message: &str) -> T { - match self { - Some(res) => res, - None => abort_call_site!(message), - } - } -} - -/// This is the entry point for a proc-macro. -/// -/// **NOT PUBLIC API, SUBJECT TO CHANGE WITHOUT ANY NOTICE** -#[doc(hidden)] -pub fn entry_point(f: F, proc_macro_hack: bool) -> proc_macro::TokenStream -where - F: FnOnce() -> proc_macro::TokenStream + UnwindSafe, -{ - ENTERED_ENTRY_POINT.with(|flag| flag.set(flag.get() + 1)); - let caught = catch_unwind(f); - let dummy = dummy::cleanup(); - let err_storage = imp::cleanup(); - ENTERED_ENTRY_POINT.with(|flag| flag.set(flag.get() - 1)); - - let gen_error = || { - if proc_macro_hack { - quote! {{ - macro_rules! proc_macro_call { - () => ( unimplemented!() ) - } - - #(#err_storage)* - #dummy - - unimplemented!() - }} - } else { - quote!( #(#err_storage)* #dummy ) - } - }; - - match caught { - Ok(ts) => { - if err_storage.is_empty() { - ts - } else { - gen_error().into() - } - } - - Err(boxed) => match boxed.downcast::() { - Ok(_) => gen_error().into(), - Err(boxed) => resume_unwind(boxed), - }, - } -} - -fn abort_now() -> ! { - check_correctness(); - panic!(AbortNow) -} - -thread_local! { - static ENTERED_ENTRY_POINT: Cell = Cell::new(0); -} - -struct AbortNow; - -fn check_correctness() { - if ENTERED_ENTRY_POINT.with(|flag| flag.get()) == 0 { - panic!( - "proc-macro-error API cannot be used outside of `entry_point` invocation, \ - perhaps you forgot to annotate your #[proc_macro] function with `#[proc_macro_error]" - ); - } -} - -/// **ALL THE STUFF INSIDE IS NOT PUBLIC API!!!** -#[doc(hidden)] -pub mod __export { - // reexports for use in macros - pub extern crate proc_macro; - pub extern crate proc_macro2; - - use proc_macro2::Span; - use quote::ToTokens; - - use crate::SpanRange; - - // inspired by - // https://github.com/dtolnay/case-studies/blob/master/autoref-specialization/README.md#simple-application - - pub trait SpanAsSpanRange { - #[allow(non_snake_case)] - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange; - } - - pub trait Span2AsSpanRange { - #[allow(non_snake_case)] - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange; - } - - pub trait ToTokensAsSpanRange { - #[allow(non_snake_case)] - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange; - } - - pub trait SpanRangeAsSpanRange { - #[allow(non_snake_case)] - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange; - } - - impl ToTokensAsSpanRange for &T { - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange { - let mut ts = self.to_token_stream().into_iter(); - let first = ts - .next() - .map(|tt| tt.span()) - .unwrap_or_else(Span::call_site); - let last = ts.last().map(|tt| tt.span()).unwrap_or(first); - SpanRange { first, last } - } - } - - impl Span2AsSpanRange for Span { - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange { - SpanRange { - first: *self, - last: *self, - } - } - } - - impl SpanAsSpanRange for proc_macro::Span { - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange { - SpanRange { - first: self.clone().into(), - last: self.clone().into(), - } - } - } - - impl SpanRangeAsSpanRange for SpanRange { - fn FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(&self) -> SpanRange { - *self - } - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/macros.rs deleted file mode 100644 index 747b684d5682..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/macros.rs +++ /dev/null @@ -1,288 +0,0 @@ -// FIXME: this can be greatly simplified via $()? -// as soon as MRSV hits 1.32 - -/// Build [`Diagnostic`](struct.Diagnostic.html) instance from provided arguments. -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -/// -#[macro_export] -macro_rules! diagnostic { - // from alias - ($err:expr) => { $crate::Diagnostic::from($err) }; - - // span, message, help - ($span:expr, $level:expr, $fmt:expr, $($args:expr),+ ; $($rest:tt)+) => {{ - #[allow(unused_imports)] - use $crate::__export::{ - ToTokensAsSpanRange, - Span2AsSpanRange, - SpanAsSpanRange, - SpanRangeAsSpanRange - }; - use $crate::DiagnosticExt; - let span_range = (&$span).FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(); - - let diag = $crate::Diagnostic::spanned_range( - span_range, - $level, - format!($fmt, $($args),*) - ); - $crate::__pme__suggestions!(diag $($rest)*); - diag - }}; - - ($span:expr, $level:expr, $msg:expr ; $($rest:tt)+) => {{ - #[allow(unused_imports)] - use $crate::__export::{ - ToTokensAsSpanRange, - Span2AsSpanRange, - SpanAsSpanRange, - SpanRangeAsSpanRange - }; - use $crate::DiagnosticExt; - let span_range = (&$span).FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(); - - let diag = $crate::Diagnostic::spanned_range(span_range, $level, $msg.to_string()); - $crate::__pme__suggestions!(diag $($rest)*); - diag - }}; - - // span, message, no help - ($span:expr, $level:expr, $fmt:expr, $($args:expr),+) => {{ - #[allow(unused_imports)] - use $crate::__export::{ - ToTokensAsSpanRange, - Span2AsSpanRange, - SpanAsSpanRange, - SpanRangeAsSpanRange - }; - use $crate::DiagnosticExt; - let span_range = (&$span).FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(); - - $crate::Diagnostic::spanned_range( - span_range, - $level, - format!($fmt, $($args),*) - ) - }}; - - ($span:expr, $level:expr, $msg:expr) => {{ - #[allow(unused_imports)] - use $crate::__export::{ - ToTokensAsSpanRange, - Span2AsSpanRange, - SpanAsSpanRange, - SpanRangeAsSpanRange - }; - use $crate::DiagnosticExt; - let span_range = (&$span).FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange(); - - $crate::Diagnostic::spanned_range(span_range, $level, $msg.to_string()) - }}; - - - // trailing commas - - ($span:expr, $level:expr, $fmt:expr, $($args:expr),+, ; $($rest:tt)+) => { - $crate::diagnostic!($span, $level, $fmt, $($args),* ; $($rest)*) - }; - ($span:expr, $level:expr, $msg:expr, ; $($rest:tt)+) => { - $crate::diagnostic!($span, $level, $msg ; $($rest)*) - }; - ($span:expr, $level:expr, $fmt:expr, $($args:expr),+,) => { - $crate::diagnostic!($span, $level, $fmt, $($args),*) - }; - ($span:expr, $level:expr, $msg:expr,) => { - $crate::diagnostic!($span, $level, $msg) - }; - // ($err:expr,) => { $crate::diagnostic!($err) }; -} - -/// Abort proc-macro execution right now and display the error. -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -#[macro_export] -macro_rules! abort { - ($err:expr) => { - $crate::diagnostic!($err).abort() - }; - - ($span:expr, $($tts:tt)*) => { - $crate::diagnostic!($span, $crate::Level::Error, $($tts)*).abort() - }; -} - -/// Shortcut for `abort!(Span::call_site(), msg...)`. This macro -/// is still preferable over plain panic, panics are not for error reporting. -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -/// -#[macro_export] -macro_rules! abort_call_site { - ($($tts:tt)*) => { - $crate::abort!($crate::__export::proc_macro2::Span::call_site(), $($tts)*) - }; -} - -/// Emit an error while not aborting the proc-macro right away. -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -/// -#[macro_export] -macro_rules! emit_error { - ($err:expr) => { - $crate::diagnostic!($err).emit() - }; - - ($span:expr, $($tts:tt)*) => {{ - let level = $crate::Level::Error; - $crate::diagnostic!($span, level, $($tts)*).emit() - }}; -} - -/// Shortcut for `emit_error!(Span::call_site(), ...)`. This macro -/// is still preferable over plain panic, panics are not for error reporting.. -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -/// -#[macro_export] -macro_rules! emit_call_site_error { - ($($tts:tt)*) => { - $crate::emit_error!($crate::__export::proc_macro2::Span::call_site(), $($tts)*) - }; -} - -/// Emit a warning. Warnings are not errors and compilation won't fail because of them. -/// -/// **Does nothing on stable** -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -/// -#[macro_export] -macro_rules! emit_warning { - ($span:expr, $($tts:tt)*) => { - $crate::diagnostic!($span, $crate::Level::Warning, $($tts)*).emit() - }; -} - -/// Shortcut for `emit_warning!(Span::call_site(), ...)`. -/// -/// **Does nothing on stable** -/// -/// # Syntax -/// -/// See [the guide](index.html#guide). -/// -#[macro_export] -macro_rules! emit_call_site_warning { - ($($tts:tt)*) => {{ - $crate::emit_warning!($crate::__export::proc_macro2::Span::call_site(), $($tts)*) - }}; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __pme__suggestions { - ($var:ident) => (); - - ($var:ident $help:ident =? $msg:expr) => { - let $var = if let Some(msg) = $msg { - $var.suggestion(stringify!($help), msg.to_string()) - } else { - $var - }; - }; - ($var:ident $help:ident =? $span:expr => $msg:expr) => { - let $var = if let Some(msg) = $msg { - $var.span_suggestion($span.into(), stringify!($help), msg.to_string()) - } else { - $var - }; - }; - - ($var:ident $help:ident =? $msg:expr ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help =? $msg); - $crate::__pme__suggestions!($var $($rest)*); - }; - ($var:ident $help:ident =? $span:expr => $msg:expr ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help =? $span => $msg); - $crate::__pme__suggestions!($var $($rest)*); - }; - - - ($var:ident $help:ident = $msg:expr) => { - let $var = $var.suggestion(stringify!($help), $msg.to_string()); - }; - ($var:ident $help:ident = $fmt:expr, $($args:expr),+) => { - let $var = $var.suggestion( - stringify!($help), - format!($fmt, $($args),*) - ); - }; - ($var:ident $help:ident = $span:expr => $msg:expr) => { - let $var = $var.span_suggestion($span.into(), stringify!($help), $msg.to_string()); - }; - ($var:ident $help:ident = $span:expr => $fmt:expr, $($args:expr),+) => { - let $var = $var.span_suggestion( - $span.into(), - stringify!($help), - format!($fmt, $($args),*) - ); - }; - - ($var:ident $help:ident = $msg:expr ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $msg); - $crate::__pme__suggestions!($var $($rest)*); - }; - ($var:ident $help:ident = $fmt:expr, $($args:expr),+ ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $fmt, $($args),*); - $crate::__pme__suggestions!($var $($rest)*); - }; - ($var:ident $help:ident = $span:expr => $msg:expr ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $span => $msg); - $crate::__pme__suggestions!($var $($rest)*); - }; - ($var:ident $help:ident = $span:expr => $fmt:expr, $($args:expr),+ ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $span => $fmt, $($args),*); - $crate::__pme__suggestions!($var $($rest)*); - }; - - // trailing commas - - ($var:ident $help:ident = $msg:expr,) => { - $crate::__pme__suggestions!($var $help = $msg) - }; - ($var:ident $help:ident = $fmt:expr, $($args:expr),+,) => { - $crate::__pme__suggestions!($var $help = $fmt, $($args)*) - }; - ($var:ident $help:ident = $span:expr => $msg:expr,) => { - $crate::__pme__suggestions!($var $help = $span => $msg) - }; - ($var:ident $help:ident = $span:expr => $fmt:expr, $($args:expr),*,) => { - $crate::__pme__suggestions!($var $help = $span => $fmt, $($args)*) - }; - ($var:ident $help:ident = $msg:expr, ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $msg; $($rest)*) - }; - ($var:ident $help:ident = $fmt:expr, $($args:expr),+, ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $fmt, $($args),*; $($rest)*) - }; - ($var:ident $help:ident = $span:expr => $msg:expr, ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $span => $msg; $($rest)*) - }; - ($var:ident $help:ident = $span:expr => $fmt:expr, $($args:expr),+, ; $($rest:tt)*) => { - $crate::__pme__suggestions!($var $help = $span => $fmt, $($args),*; $($rest)*) - }; -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/sealed.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/sealed.rs deleted file mode 100644 index a2d5081e5507..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/src/sealed.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub trait Sealed {} - -impl Sealed for crate::Diagnostic {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/macro-errors.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/macro-errors.rs deleted file mode 100644 index dd60f88a80e7..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/macro-errors.rs +++ /dev/null @@ -1,8 +0,0 @@ -extern crate trybuild; - -#[cfg_attr(skip_ui_tests, ignore)] -#[test] -fn ui() { - let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ok.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ok.rs deleted file mode 100644 index cf64c027f8f5..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ok.rs +++ /dev/null @@ -1,10 +0,0 @@ -extern crate test_crate; - -use test_crate::*; - -ok!(it_works); - -#[test] -fn check_it_works() { - it_works(); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/runtime-errors.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/runtime-errors.rs deleted file mode 100644 index 13108a2d917d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/runtime-errors.rs +++ /dev/null @@ -1,13 +0,0 @@ -use proc_macro_error::*; - -#[test] -#[should_panic = "proc-macro-error API cannot be used outside of"] -fn missing_attr_emit() { - emit_call_site_error!("You won't see me"); -} - -#[test] -#[should_panic = "proc-macro-error API cannot be used outside of"] -fn missing_attr_abort() { - abort_call_site!("You won't see me"); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/abort.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/abort.rs deleted file mode 100644 index f63118251e03..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/abort.rs +++ /dev/null @@ -1,11 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -abort_from!(one, two); -abort_to_string!(one, two); -abort_format!(one, two); -direct_abort!(one, two); -abort_notes!(one, two); -abort_call_site_test!(one, two); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/abort.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/abort.stderr deleted file mode 100644 index c5399d9d91cc..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/abort.stderr +++ /dev/null @@ -1,48 +0,0 @@ -error: abort!(span, from) test - --> $DIR/abort.rs:4:13 - | -4 | abort_from!(one, two); - | ^^^ - -error: abort!(span, single_expr) test - --> $DIR/abort.rs:5:18 - | -5 | abort_to_string!(one, two); - | ^^^ - -error: abort!(span, expr1, expr2) test - --> $DIR/abort.rs:6:15 - | -6 | abort_format!(one, two); - | ^^^ - -error: Diagnostic::abort() test - --> $DIR/abort.rs:7:15 - | -7 | direct_abort!(one, two); - | ^^^ - -error: This is an error - - = note: simple note - = help: simple help - = help: simple hint - = note: simple yay - = note: format note - = note: Some note - = note: spanned simple note - = note: spanned format note - = note: Some note - - --> $DIR/abort.rs:8:14 - | -8 | abort_notes!(one, two); - | ^^^ - -error: abort_call_site! test - --> $DIR/abort.rs:9:1 - | -9 | abort_call_site_test!(one, two); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/append_dummy.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/append_dummy.rs deleted file mode 100644 index 53d6feacc13d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/append_dummy.rs +++ /dev/null @@ -1,13 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -enum NeedDefault { - A, - B -} - -append_dummy!(need_default); - -fn main() { - let _ = NeedDefault::default(); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/append_dummy.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/append_dummy.stderr deleted file mode 100644 index 8a47ddaac417..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/append_dummy.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: append_dummy test - --> $DIR/append_dummy.rs:9:15 - | -9 | append_dummy!(need_default); - | ^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/children_messages.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/children_messages.rs deleted file mode 100644 index fb9e6dc69789..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/children_messages.rs +++ /dev/null @@ -1,6 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -children_messages!(one, two, three, four); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/children_messages.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/children_messages.stderr deleted file mode 100644 index 3b49d8316522..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/children_messages.stderr +++ /dev/null @@ -1,23 +0,0 @@ -error: main macro message - --> $DIR/children_messages.rs:4:20 - | -4 | children_messages!(one, two, three, four); - | ^^^ - -error: child message - --> $DIR/children_messages.rs:4:25 - | -4 | children_messages!(one, two, three, four); - | ^^^ - -error: main syn::Error - --> $DIR/children_messages.rs:4:30 - | -4 | children_messages!(one, two, three, four); - | ^^^^^ - -error: child syn::Error - --> $DIR/children_messages.rs:4:37 - | -4 | children_messages!(one, two, three, four); - | ^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/dummy.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/dummy.rs deleted file mode 100644 index caa4827886c4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/dummy.rs +++ /dev/null @@ -1,13 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -enum NeedDefault { - A, - B -} - -dummy!(need_default); - -fn main() { - let _ = NeedDefault::default(); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/dummy.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/dummy.stderr deleted file mode 100644 index bae078afa88e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/dummy.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: set_dummy test - --> $DIR/dummy.rs:9:8 - | -9 | dummy!(need_default); - | ^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/emit.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/emit.rs deleted file mode 100644 index c5c7db095f34..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/emit.rs +++ /dev/null @@ -1,7 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -emit!(one, two, three, four, five); -emit_notes!(one, two); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/emit.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/emit.stderr deleted file mode 100644 index 9484bd628bbc..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/emit.stderr +++ /dev/null @@ -1,48 +0,0 @@ -error: emit!(span, from) test - --> $DIR/emit.rs:4:7 - | -4 | emit!(one, two, three, four, five); - | ^^^ - -error: emit!(span, expr1, expr2) test - --> $DIR/emit.rs:4:12 - | -4 | emit!(one, two, three, four, five); - | ^^^ - -error: emit!(span, single_expr) test - --> $DIR/emit.rs:4:17 - | -4 | emit!(one, two, three, four, five); - | ^^^^^ - -error: Diagnostic::emit() test - --> $DIR/emit.rs:4:24 - | -4 | emit!(one, two, three, four, five); - | ^^^^ - -error: emit_call_site_error!(expr) test - --> $DIR/emit.rs:4:1 - | -4 | emit!(one, two, three, four, five); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: This is an error - - = note: simple note - = help: simple help - = help: simple hint - = note: simple yay - = note: format note - = note: Some note - = note: spanned simple note - = note: spanned format note - = note: Some note - - --> $DIR/emit.rs:5:13 - | -5 | emit_notes!(one, two); - | ^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/explicit_span_range.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/explicit_span_range.rs deleted file mode 100644 index 82bbebcc55d4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/explicit_span_range.rs +++ /dev/null @@ -1,6 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -explicit_span_range!(one, two, three, four); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/explicit_span_range.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/explicit_span_range.stderr deleted file mode 100644 index 781a71e76a4a..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/explicit_span_range.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: explicit SpanRange - --> $DIR/explicit_span_range.rs:4:22 - | -4 | explicit_span_range!(one, two, three, four); - | ^^^^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/misuse.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/misuse.rs deleted file mode 100644 index e6d2d2497146..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/misuse.rs +++ /dev/null @@ -1,11 +0,0 @@ -extern crate proc_macro_error; -use proc_macro_error::abort; - -struct Foo; - -#[allow(unused)] -fn foo() { - abort!(Foo, "BOOM"); -} - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/misuse.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/misuse.stderr deleted file mode 100644 index 8eaf6456fd6f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/misuse.stderr +++ /dev/null @@ -1,13 +0,0 @@ -error[E0599]: no method named `FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange` found for reference `&Foo` in the current scope - --> $DIR/misuse.rs:8:5 - | -4 | struct Foo; - | ----------- doesn't satisfy `Foo: quote::to_tokens::ToTokens` -... -8 | abort!(Foo, "BOOM"); - | ^^^^^^^^^^^^^^^^^^^^ method not found in `&Foo` - | - = note: the method `FIRST_ARG_MUST_EITHER_BE_Span_OR_IMPLEMENT_ToTokens_OR_BE_SpanRange` exists but the following trait bounds were not satisfied: - `Foo: quote::to_tokens::ToTokens` - which is required by `&Foo: proc_macro_error::__export::ToTokensAsSpanRange` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/multiple_tokens.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/multiple_tokens.rs deleted file mode 100644 index 215928f6f489..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/multiple_tokens.rs +++ /dev/null @@ -1,6 +0,0 @@ -extern crate test_crate; - -#[test_crate::multiple_tokens] -type T = (); - -fn main() {} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/multiple_tokens.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/multiple_tokens.stderr deleted file mode 100644 index c6172c6cc634..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/multiple_tokens.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: ... - --> $DIR/multiple_tokens.rs:4:1 - | -4 | type T = (); - | ^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/not_proc_macro.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/not_proc_macro.rs deleted file mode 100644 index e241c5cd2829..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/not_proc_macro.rs +++ /dev/null @@ -1,4 +0,0 @@ -use proc_macro_error::proc_macro_error; - -#[proc_macro_error] -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/not_proc_macro.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/not_proc_macro.stderr deleted file mode 100644 index f19f01bd8ed8..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/not_proc_macro.stderr +++ /dev/null @@ -1,10 +0,0 @@ -error: #[proc_macro_error] attribute can be used only with procedural macros - - = hint: if you are really sure that #[proc_macro_error] should be applied to this exact function, use #[proc_macro_error(allow_not_macro)] - - --> $DIR/not_proc_macro.rs:3:1 - | -3 | #[proc_macro_error] - | ^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/option_ext.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/option_ext.rs deleted file mode 100644 index dfbfc0383562..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/option_ext.rs +++ /dev/null @@ -1,6 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -option_ext!(one, two); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/option_ext.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/option_ext.stderr deleted file mode 100644 index 91b151ec2fa2..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/option_ext.stderr +++ /dev/null @@ -1,7 +0,0 @@ -error: Option::expect_or_abort() test - --> $DIR/option_ext.rs:4:1 - | -4 | option_ext!(one, two); - | ^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/proc_macro_hack.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/proc_macro_hack.rs deleted file mode 100644 index 2504bdd40104..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/proc_macro_hack.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Adapted from https://github.com/dtolnay/proc-macro-hack/blob/master/example/src/main.rs -// Licensed under either of Apache License, Version 2.0 or MIT license at your option. - -use proc_macro_hack_test::add_one; - -fn main() { - let two = 2; - let nine = add_one!(two) + add_one!(2 + 3); - println!("nine = {}", nine); -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/proc_macro_hack.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/proc_macro_hack.stderr deleted file mode 100644 index 0e984f918d09..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/proc_macro_hack.stderr +++ /dev/null @@ -1,26 +0,0 @@ -error: BOOM - --> $DIR/proc_macro_hack.rs:8:25 - | -8 | let nine = add_one!(two) + add_one!(2 + 3); - | ^^^ - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error: BOOM - --> $DIR/proc_macro_hack.rs:8:41 - | -8 | let nine = add_one!(two) + add_one!(2 + 3); - | ^^^^^ - | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -warning: unreachable expression - --> $DIR/proc_macro_hack.rs:8:32 - | -8 | let nine = add_one!(two) + add_one!(2 + 3); - | ------------- ^^^^^^^^^^^^^^^ unreachable expression - | | - | any code following this expression is unreachable - | - = note: `#[warn(unreachable_code)]` on by default - = note: this warning originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/result_ext.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/result_ext.rs deleted file mode 100644 index bdd560dba9c4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/result_ext.rs +++ /dev/null @@ -1,7 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -result_unwrap_or_abort!(one, two); -result_expect_or_abort!(one, two); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/result_ext.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/result_ext.stderr deleted file mode 100644 index f2dc0e4235c6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/result_ext.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: Result::unwrap_or_abort() test - --> $DIR/result_ext.rs:4:25 - | -4 | result_unwrap_or_abort!(one, two); - | ^^^ - -error: BOOM: Result::expect_or_abort() test - --> $DIR/result_ext.rs:5:25 - | -5 | result_expect_or_abort!(one, two); - | ^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/to_tokens_span.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/to_tokens_span.rs deleted file mode 100644 index a7c3fc976c9f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/to_tokens_span.rs +++ /dev/null @@ -1,6 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -to_tokens_span!(std::option::Option); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/to_tokens_span.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/to_tokens_span.stderr deleted file mode 100644 index b8c4968263a4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/to_tokens_span.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: whole type - --> $DIR/to_tokens_span.rs:4:17 - | -4 | to_tokens_span!(std::option::Option); - | ^^^^^^^^^^^^^^^^^^^ - -error: explicit .span() - --> $DIR/to_tokens_span.rs:4:17 - | -4 | to_tokens_span!(std::option::Option); - | ^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unknown_setting.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unknown_setting.rs deleted file mode 100644 index d8e58eaf8789..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unknown_setting.rs +++ /dev/null @@ -1,4 +0,0 @@ -use proc_macro_error::proc_macro_error; - -#[proc_macro_error(allow_not_macro, assert_unwind_safe, trololo)] -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unknown_setting.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unknown_setting.stderr deleted file mode 100644 index a55de0b31beb..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unknown_setting.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: unknown setting `trololo`, expected one of `assert_unwind_safe`, `allow_not_macro`, `proc_macro_hack` - --> $DIR/unknown_setting.rs:3:57 - | -3 | #[proc_macro_error(allow_not_macro, assert_unwind_safe, trololo)] - | ^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unrelated_panic.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unrelated_panic.rs deleted file mode 100644 index c74e3e0623e9..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unrelated_panic.rs +++ /dev/null @@ -1,6 +0,0 @@ -extern crate test_crate; -use test_crate::*; - -unrelated_panic!(); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unrelated_panic.stderr b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unrelated_panic.stderr deleted file mode 100644 index d46d689f2f1e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-1.0.4/tests/ui/unrelated_panic.stderr +++ /dev/null @@ -1,7 +0,0 @@ -error: proc macro panicked - --> $DIR/unrelated_panic.rs:4:1 - | -4 | unrelated_panic!(); - | ^^^^^^^^^^^^^^^^^^^ - | - = help: message: unrelated panic test diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.cargo_vcs_info.json deleted file mode 100644 index 1561cb4ee257..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.cargo_vcs_info.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "git": { - "sha1": "e231741c47af1beda78d53aee29500cccb8266cd" - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.gitignore b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.gitignore deleted file mode 100644 index 5e81b667281d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/target -**/*.rs.bk -Cargo.lock -.fuse_hidden* diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/Cargo.toml deleted file mode 100644 index a2c766de9b51..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "proc-macro-error-attr" -version = "1.0.4" -authors = ["CreepySkeleton "] -build = "build.rs" -description = "Attribute macro for proc-macro-error crate" -license = "MIT OR Apache-2.0" -repository = "https://gitlab.com/CreepySkeleton/proc-macro-error" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[lib] -proc-macro = true -[dependencies.proc-macro2] -version = "1" - -[dependencies.quote] -version = "1" -[build-dependencies.version_check] -version = "0.9" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/Cargo.toml.orig deleted file mode 100644 index a6b7069abe70..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/Cargo.toml.orig +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "proc-macro-error-attr" -version = "1.0.4" -authors = ["CreepySkeleton "] -edition = "2018" -description = "Attribute macro for proc-macro-error crate" -license = "MIT OR Apache-2.0" -repository = "https://gitlab.com/CreepySkeleton/proc-macro-error" - -build = "build.rs" - -[lib] -proc-macro = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -quote = "1" -proc-macro2 = "1" - -[build-dependencies] -version_check = "0.9" diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/LICENSE-APACHE deleted file mode 100644 index 658240a8401c..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2019-2020 CreepySkeleton - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/LICENSE-MIT deleted file mode 100644 index fc73e591d7f6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019-2020 CreepySkeleton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/build.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/build.rs deleted file mode 100644 index f2ac6a70eeb3..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/build.rs +++ /dev/null @@ -1,5 +0,0 @@ -fn main() { - if version_check::is_max_version("1.36.0").unwrap_or(false) { - println!("cargo:rustc-cfg=always_assert_unwind"); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/lib.rs deleted file mode 100644 index ac0ac21a2628..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/lib.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! This is `#[proc_macro_error]` attribute to be used with -//! [`proc-macro-error`](https://docs.rs/proc-macro-error/). There you go. - -extern crate proc_macro; - -use crate::parse::parse_input; -use crate::parse::Attribute; -use proc_macro::TokenStream; -use proc_macro2::{Literal, Span, TokenStream as TokenStream2, TokenTree}; -use quote::{quote, quote_spanned}; - -use crate::settings::{Setting::*, *}; - -mod parse; -mod settings; - -type Result = std::result::Result; - -struct Error { - span: Span, - message: String, -} - -impl Error { - fn new(span: Span, message: String) -> Self { - Error { span, message } - } - - fn into_compile_error(self) -> TokenStream2 { - let mut message = Literal::string(&self.message); - message.set_span(self.span); - quote_spanned!(self.span=> compile_error!{#message}) - } -} - -#[proc_macro_attribute] -pub fn proc_macro_error(attr: TokenStream, input: TokenStream) -> TokenStream { - match impl_proc_macro_error(attr.into(), input.clone().into()) { - Ok(ts) => ts, - Err(e) => { - let error = e.into_compile_error(); - let input = TokenStream2::from(input); - - quote!(#input #error).into() - } - } -} - -fn impl_proc_macro_error(attr: TokenStream2, input: TokenStream2) -> Result { - let (attrs, signature, body) = parse_input(input)?; - let mut settings = parse_settings(attr)?; - - let is_proc_macro = is_proc_macro(&attrs); - if is_proc_macro { - settings.set(AssertUnwindSafe); - } - - if detect_proc_macro_hack(&attrs) { - settings.set(ProcMacroHack); - } - - if settings.is_set(ProcMacroHack) { - settings.set(AllowNotMacro); - } - - if !(settings.is_set(AllowNotMacro) || is_proc_macro) { - return Err(Error::new( - Span::call_site(), - "#[proc_macro_error] attribute can be used only with procedural macros\n\n \ - = hint: if you are really sure that #[proc_macro_error] should be applied \ - to this exact function, use #[proc_macro_error(allow_not_macro)]\n" - .into(), - )); - } - - let body = gen_body(body, settings); - - let res = quote! { - #(#attrs)* - #(#signature)* - { #body } - }; - Ok(res.into()) -} - -#[cfg(not(always_assert_unwind))] -fn gen_body(block: TokenTree, settings: Settings) -> proc_macro2::TokenStream { - let is_proc_macro_hack = settings.is_set(ProcMacroHack); - let closure = if settings.is_set(AssertUnwindSafe) { - quote!(::std::panic::AssertUnwindSafe(|| #block )) - } else { - quote!(|| #block) - }; - - quote!( ::proc_macro_error::entry_point(#closure, #is_proc_macro_hack) ) -} - -// FIXME: -// proc_macro::TokenStream does not implement UnwindSafe until 1.37.0. -// Considering this is the closure's return type the unwind safety check would fail -// for virtually every closure possible, the check is meaningless. -#[cfg(always_assert_unwind)] -fn gen_body(block: TokenTree, settings: Settings) -> proc_macro2::TokenStream { - let is_proc_macro_hack = settings.is_set(ProcMacroHack); - let closure = quote!(::std::panic::AssertUnwindSafe(|| #block )); - quote!( ::proc_macro_error::entry_point(#closure, #is_proc_macro_hack) ) -} - -fn detect_proc_macro_hack(attrs: &[Attribute]) -> bool { - attrs - .iter() - .any(|attr| attr.path_is_ident("proc_macro_hack")) -} - -fn is_proc_macro(attrs: &[Attribute]) -> bool { - attrs.iter().any(|attr| { - attr.path_is_ident("proc_macro") - || attr.path_is_ident("proc_macro_derive") - || attr.path_is_ident("proc_macro_attribute") - }) -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/parse.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/parse.rs deleted file mode 100644 index 6f4663f80e32..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/parse.rs +++ /dev/null @@ -1,89 +0,0 @@ -use crate::{Error, Result}; -use proc_macro2::{Delimiter, Ident, Span, TokenStream, TokenTree}; -use quote::ToTokens; -use std::iter::Peekable; - -pub(crate) fn parse_input( - input: TokenStream, -) -> Result<(Vec, Vec, TokenTree)> { - let mut input = input.into_iter().peekable(); - let mut attrs = Vec::new(); - - while let Some(attr) = parse_next_attr(&mut input)? { - attrs.push(attr); - } - - let sig = parse_signature(&mut input); - let body = input.next().ok_or_else(|| { - Error::new( - Span::call_site(), - "`#[proc_macro_error]` can be applied only to functions".to_string(), - ) - })?; - - Ok((attrs, sig, body)) -} - -fn parse_next_attr( - input: &mut Peekable>, -) -> Result> { - let shebang = match input.peek() { - Some(TokenTree::Punct(ref punct)) if punct.as_char() == '#' => input.next().unwrap(), - _ => return Ok(None), - }; - - let group = match input.peek() { - Some(TokenTree::Group(ref group)) if group.delimiter() == Delimiter::Bracket => { - let res = group.clone(); - input.next(); - res - } - other => { - let span = other.map_or(Span::call_site(), |tt| tt.span()); - return Err(Error::new(span, "expected `[`".to_string())); - } - }; - - let path = match group.stream().into_iter().next() { - Some(TokenTree::Ident(ident)) => Some(ident), - _ => None, - }; - - Ok(Some(Attribute { - shebang, - group: TokenTree::Group(group), - path, - })) -} - -fn parse_signature(input: &mut Peekable>) -> Vec { - let mut sig = Vec::new(); - loop { - match input.peek() { - Some(TokenTree::Group(ref group)) if group.delimiter() == Delimiter::Brace => { - return sig; - } - None => return sig, - _ => sig.push(input.next().unwrap()), - } - } -} - -pub(crate) struct Attribute { - pub(crate) shebang: TokenTree, - pub(crate) group: TokenTree, - pub(crate) path: Option, -} - -impl Attribute { - pub(crate) fn path_is_ident(&self, ident: &str) -> bool { - self.path.as_ref().map_or(false, |p| *p == ident) - } -} - -impl ToTokens for Attribute { - fn to_tokens(&self, ts: &mut TokenStream) { - self.shebang.to_tokens(ts); - self.group.to_tokens(ts); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/settings.rs b/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/settings.rs deleted file mode 100644 index 0b7ec766f6ce..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/proc-macro-error-attr-1.0.4/src/settings.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::{Error, Result}; -use proc_macro2::{Ident, Span, TokenStream, TokenTree}; - -macro_rules! decl_settings { - ($($val:expr => $variant:ident),+ $(,)*) => { - #[derive(PartialEq)] - pub(crate) enum Setting { - $($variant),* - } - - fn ident_to_setting(ident: Ident) -> Result { - match &*ident.to_string() { - $($val => Ok(Setting::$variant),)* - _ => { - let possible_vals = [$($val),*] - .iter() - .map(|v| format!("`{}`", v)) - .collect::>() - .join(", "); - - Err(Error::new( - ident.span(), - format!("unknown setting `{}`, expected one of {}", ident, possible_vals))) - } - } - } - }; -} - -decl_settings! { - "assert_unwind_safe" => AssertUnwindSafe, - "allow_not_macro" => AllowNotMacro, - "proc_macro_hack" => ProcMacroHack, -} - -pub(crate) fn parse_settings(input: TokenStream) -> Result { - let mut input = input.into_iter(); - let mut res = Settings(Vec::new()); - loop { - match input.next() { - Some(TokenTree::Ident(ident)) => { - res.0.push(ident_to_setting(ident)?); - } - None => return Ok(res), - other => { - let span = other.map_or(Span::call_site(), |tt| tt.span()); - return Err(Error::new(span, "expected identifier".to_string())); - } - } - - match input.next() { - Some(TokenTree::Punct(ref punct)) if punct.as_char() == ',' => {} - None => return Ok(res), - other => { - let span = other.map_or(Span::call_site(), |tt| tt.span()); - return Err(Error::new(span, "expected `,`".to_string())); - } - } - } -} - -pub(crate) struct Settings(Vec); - -impl Settings { - pub(crate) fn is_set(&self, setting: Setting) -> bool { - self.0.iter().any(|s| *s == setting) - } - - pub(crate) fn set(&mut self, setting: Setting) { - self.0.push(setting) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.cargo_vcs_info.json deleted file mode 100644 index 35e8c7983601..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "46ae7e3db24ff9e3c4b34e7ef810e0f7c59cb5c0" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.github/workflows/ci.yml deleted file mode 100644 index 67a026d6e512..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.github/workflows/ci.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: ci - -on: [push, pull_request] - -jobs: - build: - - strategy: - matrix: - os: [macOS-latest, ubuntu-latest] - toolchain: [stable, beta, nightly, 1.32.0] - - runs-on: ${{ matrix.os }} - - steps: - - uses: actions/checkout@master - - name: Install Rust - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.toolchain }} - override: true - - name: Run cargo check --all - env: - RUSTFLAGS: -D warnings - run: | - cargo check --all - - name: Run the tests - env: - RUSTFLAGS: -D warnings - run: | - cargo test --all - - name: Run tests with const generics enabled - if: matrix.toolchain == 'nightly' || matrix.toolchain == 'beta' - run: | - cargo test --all --features const-generics - - name: Run cargo doc - run: | - cargo doc --all diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.gitignore b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.gitignore deleted file mode 100644 index c74a93a399db..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/target -**/*.rs.bk -Cargo.lock -*swp diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/Cargo.toml deleted file mode 100644 index 03311a263df6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "serde-big-array" -version = "0.3.3" -authors = ["est31 ", "David Tolnay "] -description = "Big array helper for serde." -documentation = "https://docs.rs/serde-big-array" -readme = "README.md" -license = "MIT OR Apache-2.0" -repository = "https://github.com/est31/serde-big-array" -[dependencies.serde] -version = "1.0" -default-features = false -[dev-dependencies.serde_derive] -version = "1.0" - -[dev-dependencies.serde_json] -version = "1.0" - -[features] -const-generics = [] diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/Cargo.toml.orig deleted file mode 100644 index 43f0175d8486..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/Cargo.toml.orig +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "serde-big-array" -version = "0.3.3" -authors = ["est31 ", "David Tolnay "] -license = "MIT OR Apache-2.0" -description = "Big array helper for serde." -documentation = "https://docs.rs/serde-big-array" -repository = "https://github.com/est31/serde-big-array" -readme = "README.md" -edition = "2018" - -[dependencies] -serde = { version = "1.0", default-features = false } - -[dev-dependencies] -serde_derive = "1.0" -serde_json = "1.0" - -[features] -const-generics = [] diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/LICENSE-APACHE deleted file mode 100644 index 19d02b9f73c2..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2018 - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/LICENSE-MIT deleted file mode 100644 index d46d39c46ce6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/LICENSE-MIT +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/README.md b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/README.md deleted file mode 100644 index e07ca766b5fc..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/README.md +++ /dev/null @@ -1,82 +0,0 @@ -## serde-big-array - -[![docs](https://docs.rs/serde-big-array/badge.svg)](https://docs.rs/crate/serde-big-array) -[![crates.io](https://img.shields.io/crates/v/serde-big-array.svg)](https://crates.io/crates/serde-big-array) -[![dependency status](https://deps.rs/repo/github/est31/serde-big-array/status.svg)](https://deps.rs/repo/github/est31/serde-big-array) - -Big array helper for serde. The purpose of this crate is to make (de-)serializing arrays of sizes > 32 easy. This solution is needed until [serde adopts const generics support](https://github.com/serde-rs/serde/issues/1937). - -Bases on [this](https://github.com/serde-rs/serde/issues/631#issuecomment-322677033) snippet. - -```Rust -extern crate serde; -#[macro_use] -extern crate serde_derive; -extern crate serde_json; -#[macro_use] -extern crate serde_big_array; - -big_array! { BigArray; } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], -} - -#[test] -fn test() { - let s = S { arr: [1; 64] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} -``` - -If you enable the `const-generics` feature, you won't have to invoke the `big_array` macro any more: - -```Rust -#[macro_use] -extern crate serde_derive; -use serde_big_array::BigArray; - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], -} - -#[test] -fn test() { - let s = S { arr: [1; 64] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} -``` - -Important links: - -* Original serde issue [requesting large array support](https://github.com/serde-rs/serde/issues/631) -* [Const generics support issue on serde](https://github.com/serde-rs/serde/issues/1937) -* [serde PR](https://github.com/serde-rs/serde/pull/1860) to add const generics support -* Rust [const generics tracking issue](https://github.com/rust-lang/rust/issues/44580) -* Rust [complex generic constants tracking issue](https://github.com/rust-lang/rust/issues/76560) - -### MSRV - -The minimum supported Rust version (MSRV) is Rust 1.32.0. - -### License -[license]: #license - -This crate is distributed under the terms of both the MIT license -and the Apache License (Version 2.0), at your option. - -See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT) for details. - -#### License of your contributions - -Unless you explicitly state otherwise, any contribution intentionally submitted for -inclusion in the work by you, as defined in the Apache-2.0 license, -shall be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/src/const_generics.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/src/const_generics.rs deleted file mode 100644 index f8fe12ee7538..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/src/const_generics.rs +++ /dev/null @@ -1,63 +0,0 @@ -use core::fmt; -use core::result; -use core::marker::PhantomData; -use core::mem::MaybeUninit; -use serde::ser::{Serialize, Serializer, SerializeTuple}; -use serde::de::{Deserialize, Deserializer, Visitor, SeqAccess, Error}; - -pub trait BigArray<'de>: Sized { - fn serialize(&self, serializer: S) -> result::Result - where S: Serializer; - fn deserialize(deserializer: D) -> result::Result - where D: Deserializer<'de>; -} -impl<'de, T, const N: usize> BigArray<'de> for [T; N] - where T: Serialize + Deserialize<'de> -{ - fn serialize(&self, serializer: S) -> result::Result - where S: Serializer - { - let mut seq = serializer.serialize_tuple(self.len())?; - for elem in &self[..] { - seq.serialize_element(elem)?; - } - seq.end() - } - - fn deserialize(deserializer: D) -> result::Result - where D: Deserializer<'de> - { - struct ArrayVisitor { - element: PhantomData, - } - - impl<'de, T, const N: usize> Visitor<'de> for ArrayVisitor<[T; N]> - where T: Deserialize<'de> - { - type Value = [T; N]; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "an array of length {}", N) - } - - fn visit_seq(self, mut seq: A) -> result::Result<[T; N], A::Error> - where A: SeqAccess<'de> - { - unsafe { - let mut arr: MaybeUninit<[T; N]> = MaybeUninit::uninit(); - for i in 0 .. N { - let p = (arr.as_mut_ptr() as * mut T).wrapping_add(i); - core::ptr::write(p, seq.next_element()? - .ok_or_else(|| Error::invalid_length(i, &self))?); - } - Ok(arr.assume_init()) - } - } - } - - let visitor = ArrayVisitor { element: PhantomData }; - // The allow is needed to support (32 + 33) like expressions - #[allow(unused_parens)] - deserializer.deserialize_tuple(N, visitor) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/src/lib.rs deleted file mode 100644 index a2de3798df5f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/src/lib.rs +++ /dev/null @@ -1,228 +0,0 @@ -#![cfg_attr(not(feature = "const-generics"), forbid(unsafe_code))] - -/*! -Big array helper for serde. -The purpose of this crate is to make (de-)serializing arrays of sizes > 32 easy. -This solution is needed until [serde adopts const generics support](https://github.com/serde-rs/serde/issues/1937). - -## Example -``` -extern crate serde; -#[macro_use] -extern crate serde_derive; -extern crate serde_json; -#[macro_use] -extern crate serde_big_array; - -big_array! { BigArray; } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], -} - -#[test] -fn test() { - let s = S { arr: [1; 64] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); - assert!(false); -} - -# fn main() {} -``` - -If you enable the `const-generics` feature, you won't have to invoke the `big_array` macro any more: - -```Rust -#[macro_use] -extern crate serde_derive; -use serde_big_array::BigArray; - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], -} - -#[test] -fn test() { - let s = S { arr: [1; 64] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} - -# fn main() {} -``` -*/ -#![no_std] - -#[doc(hidden)] -pub mod reex { - pub use core::fmt; - pub use core::result; - pub use core::marker::PhantomData; - pub use serde::ser; - pub use serde::ser::{Serialize, Serializer}; - pub use serde::de::{Deserialize, Deserializer, Visitor, SeqAccess, Error}; -} - -#[cfg(feature = "const-generics")] -mod const_generics; -#[cfg(feature = "const-generics")] -pub use const_generics::BigArray; - -/** -Big array macro - -This is the main macro of this crate. -Invoking it creates a trait that can be used together with a `#[serde(with = "TraitName")]` like attribute -on an array that's a member of a struct you want to (de-) serialize. -``` -# use serde_derive::{Serialize, Deserialize}; -# use serde_big_array::big_array; -# fn main() {} -# -big_array! { BigArray; } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 128], -} -``` -The name of the added trait is your choice. - -The macro doesn't automatically implement the trait for all possible array lengths. -Instead, the trait is implemented for a pre-specified set of numbers. -The default way to invoke the macro is by specifying the name only, like: -``` -# use serde_derive::{Serialize, Deserialize}; -# use serde_big_array::big_array; -# fn main() {} -# -big_array! { - BigArray; -} -``` -Then, the trait will be implemented for a pre-defined set of interesting array lengths. -Currently, the numbers are: -```ignore -40, 48, 50, 56, 64, 72, 96, 100, 128, 160, 192, 200, 224, 256, 384, 512, -768, 1024, 2048, 4096, 8192, 16384, 32768, 65536, -``` -These are the same numbers that the `arrayvec` crate uses as well, -and should cover most places this macro is used. - -If this default setting is not suiting your use case, the macro has you covered as well. -You can specify a custom set of numbers by using the second way to invoke the macro: - -``` -# use serde_derive::{Serialize, Deserialize}; -# use serde_big_array::big_array; -# fn main() {} -# -big_array! { - BigArray; - +42, 300, 1234, 99999, -} - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr_a: [u8; 300], - #[serde(with = "BigArray")] - arr_b: [u8; 42], -} -``` - -If the `+` is specified like in the example above, the trait is also implemented for the -pre-defined set of array lengths. If omitted, it's implemented for the specified numbers only. -*/ -#[macro_export] -macro_rules! big_array { - ($name:ident; $($len:expr),+ $(,)?) => { - pub trait $name<'de>: Sized { - fn serialize(&self, serializer: S) -> $crate::reex::result::Result - where S: $crate::reex::Serializer; - fn deserialize(deserializer: D) -> $crate::reex::result::Result - where D: $crate::reex::Deserializer<'de>; - } - $( - impl<'de, T> $name<'de> for [T; $len] - where T: Default + Copy + $crate::reex::Serialize + $crate::reex::Deserialize<'de> - { - fn serialize(&self, serializer: S) -> $crate::reex::result::Result - where S: $crate::reex::Serializer - { - use $crate::reex::ser::SerializeTuple; - let mut seq = serializer.serialize_tuple(self.len())?; - for elem in &self[..] { - seq.serialize_element(elem)?; - } - seq.end() - } - - fn deserialize(deserializer: D) -> $crate::reex::result::Result<[T; $len], D::Error> - where D: $crate::reex::Deserializer<'de> - { - use $crate::reex::PhantomData; - struct ArrayVisitor { - element: PhantomData, - } - - impl<'de, T> $crate::reex::Visitor<'de> for ArrayVisitor - where T: Default + Copy + $crate::reex::Deserialize<'de> - { - type Value = [T; $len]; - - fn expecting(&self, formatter: &mut $crate::reex::fmt::Formatter) -> $crate::reex::fmt::Result { - macro_rules! write_len { - ($l:literal) => { - write!(formatter, concat!("an array of length ", $l)) - }; - ($l:tt) => { - write!(formatter, "an array of length {}", $l) - }; - } - - write_len!($len) - } - - fn visit_seq(self, mut seq: A) -> $crate::reex::result::Result<[T; $len], A::Error> - where A: $crate::reex::SeqAccess<'de> - { - let mut arr = [T::default(); $len]; - for i in 0..$len { - arr[i] = seq.next_element()? - .ok_or_else(|| $crate::reex::Error::invalid_length(i, &self))?; - } - Ok(arr) - } - } - - let visitor = ArrayVisitor { element: PhantomData }; - // The allow is needed to support (32 + 33) like expressions - #[allow(unused_parens)] - deserializer.deserialize_tuple($len, visitor) - } - } - )+ - }; - ($name:ident; + $($len:expr),* $(,)?) => { - big_array! { - $name; - 40, 48, 50, 56, 64, 72, 96, 100, 128, 160, 192, 200, 224, 256, 384, 512, - 768, 1024, 2048, 4096, 8192, 16384, 32768, 65536, - $($len,)* - } - }; - ($name:ident;) => { - big_array! { - $name; + - } - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/basic.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/basic.rs deleted file mode 100644 index 26679a81bb99..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/basic.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![no_std] - -use serde_derive::{Serialize, Deserialize}; -use serde_big_array::big_array; - -big_array! { BigArray; } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], -} - -#[test] -fn test() { - let s = S { arr: [1; 64] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const.rs deleted file mode 100644 index 30218428a539..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const.rs +++ /dev/null @@ -1,22 +0,0 @@ -#![no_std] - -use serde_derive::{Serialize, Deserialize}; -use serde_big_array::big_array; - -const NUMBER: usize = 127; - -big_array! { BigArray; NUMBER, } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; NUMBER], -} - -#[test] -fn test() { - let s = S { arr: [1; NUMBER] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_expr.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_expr.rs deleted file mode 100644 index 99aafe82851f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_expr.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![no_std] - -use serde_derive::{Serialize, Deserialize}; -use serde_big_array::big_array; - -const NUMBER: usize = 137; - -big_array! { - BigArray; - NUMBER * NUMBER + 17, NUMBER, 42 -} - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr_1: [u8; NUMBER * NUMBER + 17], - #[serde(with = "BigArray")] - arr_2: [u8; NUMBER], - #[serde(with = "BigArray")] - arr_3: [u8; 42], -} - -#[test] -fn test() { - let s = S { - arr_1: [1; NUMBER * NUMBER + 17], - arr_2: [2; NUMBER], - arr_3: [3; 42], - }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr_1[..] == &s_back.arr_1[..]); - assert!(&s.arr_2[..] == &s_back.arr_2[..]); - assert!(&s.arr_3[..] == &s_back.arr_3[..]); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_generics.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_generics.rs deleted file mode 100644 index 24a564864422..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_generics.rs +++ /dev/null @@ -1,20 +0,0 @@ -#![cfg(feature = "const-generics")] - -use serde_derive::{Serialize, Deserialize}; -use serde_big_array::BigArray; - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], - #[serde(with = "BigArray")] - arr2: [u8; 65], -} - -#[test] -fn test() { - let s = S { arr: [1; 64], arr2: [1; 65] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_path.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_path.rs deleted file mode 100644 index 8fce9076650a..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/const_path.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![no_std] - -use serde_derive::{Serialize, Deserialize}; -use serde_big_array::big_array; - -mod module { - pub const NUMBER: usize = 127; -} - -big_array! { BigArray; module::NUMBER, } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; module::NUMBER], -} - -#[test] -fn test() { - let s = S { arr: [1; module::NUMBER] }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/plus.rs b/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/plus.rs deleted file mode 100644 index fa804bc858af..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde-big-array-0.3.3/tests/plus.rs +++ /dev/null @@ -1,31 +0,0 @@ -#![no_std] - -use serde_big_array::big_array; -use serde_derive::{Deserialize, Serialize}; - -big_array! { BigArray; +127, } -big_array! { BigArrayNoTrailing; +127, 129 } - -#[derive(Serialize, Deserialize)] -struct S { - #[serde(with = "BigArray")] - arr: [u8; 64], - #[serde(with = "BigArray")] - arr_2: [u8; 127], - #[serde(with = "BigArrayNoTrailing")] - arr_3: [u8; 129], -} - -#[test] -fn test() { - let s = S { - arr: [1; 64], - arr_2: [1; 127], - arr_3: [1; 129], - }; - let j = serde_json::to_string(&s).unwrap(); - let s_back = serde_json::from_str::(&j).unwrap(); - assert!(&s.arr[..] == &s_back.arr[..]); - assert!(&s.arr_2[..] == &s_back.arr_2[..]); - assert!(&s.arr_3[..] == &s_back.arr_3[..]); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.cargo_vcs_info.json deleted file mode 100644 index ba6d44a32562..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "1708a2bc7e8eb7ff60d4409eecef9f62d5451be5" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.github/workflows/build.yml b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.github/workflows/build.yml deleted file mode 100644 index 228fb4104577..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.github/workflows/build.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: build - -on: [push, pull_request] - -jobs: - build: - name: Build - strategy: - fail-fast: false - matrix: - platform: [ubuntu-latest, macos-latest, windows-latest] - toolchain: [stable] - runs-on: ${{ matrix.platform }} - - steps: - - name: Checkout Sources - uses: actions/checkout@v3 - - - name: Cache Dependencies & Build Outputs - uses: actions/cache@v3 - with: - path: ~/.cargo - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - - name: Install Rust Toolchain - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ matrix.toolchain }} - components: rustfmt, clippy - - - name: Check Code Format - run: cargo fmt --all -- --check - shell: bash - - - name: Code Lint - run: cargo clippy --all-targets --workspace -- -D warnings - shell: bash - - - name: Test - run: cargo test --workspace - shell: bash diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/Cargo.toml deleted file mode 100644 index d8017dd90bb4..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/Cargo.toml +++ /dev/null @@ -1,72 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "serde_ipld_dagcbor" -version = "0.4.2" -authors = [ - "Pyfisch ", - "Steven Fackler ", - "Volker Mische ", -] -description = "IPLD DAG-CBOR support for Serde." -readme = "README.md" -keywords = [ - "serde", - "cbor", - "serialization", - "no_std", -] -categories = ["encoding"] -license = "MIT/Apache-2.0" -repository = "https://github.com/ipld/serde_ipld_dagcbor" - -[dependencies.cbor4ii] -version = "0.2.14" -features = ["use_alloc"] -default-features = false - -[dependencies.cid] -version = "0.10.1" -features = ["serde-codec"] -default-features = false - -[dependencies.scopeguard] -version = "1.1.0" - -[dependencies.serde] -version = "1.0.164" -features = ["alloc"] -default-features = false - -[dev-dependencies.libipld-core] -version = "0.16.0" -features = ["serde-codec"] -default-features = false - -[dev-dependencies.serde_bytes] -version = "0.11.9" -features = ["alloc"] -default-features = false - -[dev-dependencies.serde_derive] -version = "1.0.164" -default-features = false - -[features] -default = ["std"] -std = [ - "cbor4ii/use_std", - "cid/std", - "serde/std", - "serde_bytes/std", -] diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/Cargo.toml.orig deleted file mode 100644 index 3682cb894598..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/Cargo.toml.orig +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "serde_ipld_dagcbor" -version = "0.4.2" -authors = [ - "Pyfisch ", - "Steven Fackler ", - "Volker Mische " -] -repository = "https://github.com/ipld/serde_ipld_dagcbor" -readme = "README.md" -license = "MIT/Apache-2.0" -description = "IPLD DAG-CBOR support for Serde." -keywords = ["serde", "cbor", "serialization", "no_std"] -categories = ["encoding"] -edition = "2018" - -[dependencies] -cbor4ii = { version = "0.2.14", default-features = false, features = ["use_alloc"] } -cid = { version = "0.10.1", default-features = false, features = ["serde-codec"] } -scopeguard = "1.1.0" -serde = { version = "1.0.164", default-features = false, features = ["alloc"] } - -[dev-dependencies] -serde_derive = { version = "1.0.164", default-features = false } -libipld-core = { version = "0.16.0", default-features = false, features = ["serde-codec"] } -serde_bytes = { version = "0.11.9", default-features = false, features = ["alloc"]} - -[features] -default = ["std"] -std = ["cbor4ii/use_std", "cid/std", "serde/std", "serde_bytes/std"] diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/README.md b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/README.md deleted file mode 100644 index 39754e795036..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/README.md +++ /dev/null @@ -1,77 +0,0 @@ -Serde IPLD DAG-CBOR -=================== - -[![Crates.io](https://img.shields.io/crates/v/serde_ipld_dag_cbor.svg)](https://crates.io/crates/serde_ipld_dagcbor) -[![Documentation](https://docs.rs/serde_ipld_dag_cbor/badge.svg)](https://docs.rs/serde_ipld_dag_cbor) - -This is a [Serde] implementation for [DAG-CBOR]. It can be use in conjunction with [libipld]. - -The underlying library for CBOR encoding/decoding is [cbor4ii] and the Serde implementation is also heavily based on their code. - -This crate started as a fork of [serde_cbor], thanks everyone involved there. - -[Serde]: https://github.com/serde-rs/serde -[DAG-CBOR]: https://ipld.io/specs/codecs/dag-cbor/spec/ -[libipld]: https://github.com/ipld/libipld -[cbor4ii]: https://github.com/quininer/cbor4ii -[serde_cbor]: https://github.com/pyfisch/cbor - - -Usage ------ - -Storing and loading Rust types is easy and requires only -minimal modifications to the program code. - -```rust -use serde_derive::{Deserialize, Serialize}; -use std::error::Error; -use std::fs::File; -use std::io::BufReader; - -// Types annotated with `Serialize` can be stored as DAG-CBOR. -// To be able to load them again add `Deserialize`. -#[derive(Debug, Serialize, Deserialize)] -struct Mascot { - name: String, - species: String, - year_of_birth: u32, -} - -fn main() -> Result<(), Box> { - let ferris = Mascot { - name: "Ferris".to_owned(), - species: "crab".to_owned(), - year_of_birth: 2015, - }; - - let ferris_file = File::create("examples/ferris.cbor")?; - // Write Ferris to the given file. - // Instead of a file you can use any type that implements `io::Write` - // like a HTTP body, database connection etc. - serde_ipld_dagcbor::to_writer(ferris_file, &ferris)?; - - let tux_file = File::open("examples/tux.cbor")?; - let tux_reader = BufReader::new(tux_file); - // Load Tux from a file. - // Serde IPLD DAG-CBOR performs roundtrip serialization meaning that - // the data will not change in any way. - let tux: Mascot = serde_ipld_dagcbor::from_reader(tux_reader)?; - - println!("{:?}", tux); - // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 } - - Ok(()) -} -``` - - -License -------- - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/de.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/de.rs deleted file mode 100644 index 330a8c2f75c2..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/de.rs +++ /dev/null @@ -1,681 +0,0 @@ -//! Deserialization. -#[cfg(not(feature = "std"))] -use alloc::borrow::Cow; -use core::convert::{Infallible, TryFrom}; -#[cfg(feature = "std")] -use std::borrow::Cow; - -use cbor4ii::core::dec::{self, Decode}; -use cbor4ii::core::{major, types, utils::SliceReader}; -use cid::serde::CID_SERDE_PRIVATE_IDENTIFIER; -use serde::de::{self, Visitor}; - -use crate::cbor4ii_nonpub::{marker, peek_one, pull_one}; -use crate::error::DecodeError; -use crate::CBOR_TAGS_CID; -#[cfg(feature = "std")] -use cbor4ii::core::utils::IoReader; - -/// Decodes a value from CBOR data in a slice. -/// -/// # Examples -/// -/// Deserialize a `String` -/// -/// ``` -/// # use serde_ipld_dagcbor::de; -/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; -/// let value: String = de::from_slice(&v[..]).unwrap(); -/// assert_eq!(value, "foobar"); -/// ``` -/// -/// Deserialize a borrowed string with zero copies. -/// -/// ``` -/// # use serde_ipld_dagcbor::de; -/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; -/// let value: &str = de::from_slice(&v[..]).unwrap(); -/// assert_eq!(value, "foobar"); -/// ``` -pub fn from_slice<'a, T>(buf: &'a [u8]) -> Result> -where - T: de::Deserialize<'a>, -{ - let reader = SliceReader::new(buf); - let mut deserializer = Deserializer::from_reader(reader); - let value = serde::Deserialize::deserialize(&mut deserializer)?; - deserializer.end()?; - Ok(value) -} - -/// Decodes a value from CBOR data in a reader. -/// -/// # Examples -/// -/// Deserialize a `String` -/// -/// ``` -/// # use serde_ipld_dagcbor::de; -/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; -/// let value: String = de::from_reader(&v[..]).unwrap(); -/// assert_eq!(value, "foobar"); -/// ``` -/// -/// Note that `from_reader` cannot borrow data: -/// -/// ```compile_fail -/// # use serde_ipld_dagcbor::de; -/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; -/// let value: &str = de::from_reader(&v[..]).unwrap(); -/// assert_eq!(value, "foobar"); -/// ``` -#[cfg(feature = "std")] -pub fn from_reader(reader: R) -> Result> -where - T: de::DeserializeOwned, - R: std::io::BufRead, -{ - let reader = IoReader::new(reader); - let mut deserializer = Deserializer::from_reader(reader); - let value = serde::Deserialize::deserialize(&mut deserializer)?; - deserializer.end()?; - Ok(value) -} - -/// A Serde `Deserialize`r of DAG-CBOR data. -#[derive(Debug)] -struct Deserializer { - reader: R, -} - -impl Deserializer { - /// Constructs a `Deserializer` which reads from a `Read`er. - pub fn from_reader(reader: R) -> Deserializer { - Deserializer { reader } - } -} - -impl<'de, R: dec::Read<'de>> Deserializer { - #[allow(clippy::type_complexity)] - #[inline] - fn try_step<'a>( - &'a mut self, - ) -> Result ()>, DecodeError> - { - if self.reader.step_in() { - Ok(scopeguard::guard(self, |de| de.reader.step_out())) - } else { - Err(DecodeError::DepthLimit) - } - } - - #[inline] - fn deserialize_cid(&mut self, visitor: V) -> Result> - where - V: Visitor<'de>, - { - let tag = dec::TagStart::decode(&mut self.reader)?; - - match tag.0 { - CBOR_TAGS_CID => visitor.visit_newtype_struct(&mut CidDeserializer(self)), - _ => Err(DecodeError::TypeMismatch { - name: "CBOR tag", - byte: tag.0 as u8, - }), - } - } - - /// This method should be called after a value has been deserialized to ensure there is no - /// trailing data in the input source. - pub fn end(&mut self) -> Result<(), DecodeError> { - match peek_one(&mut self.reader) { - Ok(_) => Err(DecodeError::TrailingData), - Err(DecodeError::Eof) => Ok(()), - Err(error) => Err(error), - } - } -} - -macro_rules! deserialize_type { - ( @ $t:ty , $name:ident , $visit:ident ) => { - #[inline] - fn $name(self, visitor: V) -> Result - where V: Visitor<'de> - { - let value = <$t>::decode(&mut self.reader)?; - visitor.$visit(value) - } - }; - ( $( $t:ty , $name:ident , $visit:ident );* $( ; )? ) => { - $( - deserialize_type!(@ $t, $name, $visit); - )* - }; -} - -impl<'de, 'a, R: dec::Read<'de>> serde::Deserializer<'de> for &'a mut Deserializer { - type Error = DecodeError; - - fn deserialize_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let mut de = self.try_step()?; - let de = &mut *de; - - let byte = peek_one(&mut de.reader)?; - if is_indefinite(byte) { - return Err(DecodeError::IndefiniteSize); - } - match dec::if_major(byte) { - major::UNSIGNED => de.deserialize_u64(visitor), - major::NEGATIVE => { - // CBOR supports negative integers up to -2^64 which is less than i64::MIN. Only - // treat it as i128, if it is outside the i64 range. - let value = i128::decode(&mut de.reader)?; - match i64::try_from(value) { - Ok(value_i64) => visitor.visit_i64(value_i64), - Err(_) => visitor.visit_i128(value), - } - } - major::BYTES => de.deserialize_byte_buf(visitor), - major::STRING => de.deserialize_string(visitor), - major::ARRAY => de.deserialize_seq(visitor), - major::MAP => de.deserialize_map(visitor), - // The only supported tag is tag 42 (CID). - major::TAG => de.deserialize_cid(visitor), - major::SIMPLE => match byte { - marker::FALSE => { - de.reader.advance(1); - visitor.visit_bool(false) - } - marker::TRUE => { - de.reader.advance(1); - visitor.visit_bool(true) - } - marker::NULL => { - de.reader.advance(1); - visitor.visit_none() - } - marker::F32 => de.deserialize_f32(visitor), - marker::F64 => de.deserialize_f64(visitor), - _ => Err(DecodeError::Unsupported { byte }), - }, - _ => Err(DecodeError::Unsupported { byte }), - } - } - - deserialize_type!( - bool, deserialize_bool, visit_bool; - - i8, deserialize_i8, visit_i8; - i16, deserialize_i16, visit_i16; - i32, deserialize_i32, visit_i32; - i64, deserialize_i64, visit_i64; - i128, deserialize_i128, visit_i128; - - u8, deserialize_u8, visit_u8; - u16, deserialize_u16, visit_u16; - u32, deserialize_u32, visit_u32; - u64, deserialize_u64, visit_u64; - u128, deserialize_u128, visit_u128; - - f32, deserialize_f32, visit_f32; - f64, deserialize_f64, visit_f64; - ); - - #[inline] - fn deserialize_char(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - // Treat it as a String. - // This is a bit wasteful when encountering strings of more than one character, - // but we are optimistic this is a cold path. - self.deserialize_str(visitor) - } - - #[inline] - fn deserialize_bytes(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match >>::decode(&mut self.reader)?.0 { - Cow::Borrowed(buf) => visitor.visit_borrowed_bytes(buf), - Cow::Owned(buf) => visitor.visit_byte_buf(buf), - } - } - - #[inline] - fn deserialize_byte_buf(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_bytes(visitor) - } - - #[inline] - fn deserialize_str(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - match >::decode(&mut self.reader)? { - Cow::Borrowed(buf) => visitor.visit_borrowed_str(buf), - Cow::Owned(buf) => visitor.visit_string(buf), - } - } - - #[inline] - fn deserialize_string(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_str(visitor) - } - - #[inline] - fn deserialize_option(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let byte = peek_one(&mut self.reader)?; - if byte != marker::NULL { - let mut de = self.try_step()?; - visitor.visit_some(&mut **de) - } else { - self.reader.advance(1); - visitor.visit_none() - } - } - - #[inline] - fn deserialize_unit(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let byte = pull_one(&mut self.reader)?; - if byte == marker::NULL { - visitor.visit_unit() - } else { - Err(DecodeError::TypeMismatch { name: "unit", byte }) - } - } - - #[inline] - fn deserialize_unit_struct( - self, - _name: &'static str, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.deserialize_unit(visitor) - } - - #[inline] - fn deserialize_newtype_struct( - self, - name: &'static str, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - if name == CID_SERDE_PRIVATE_IDENTIFIER { - self.deserialize_cid(visitor) - } else { - visitor.visit_newtype_struct(self) - } - } - - #[inline] - fn deserialize_seq(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let mut de = self.try_step()?; - let seq = Accessor::array(&mut de)?; - visitor.visit_seq(seq) - } - - #[inline] - fn deserialize_tuple(self, len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - let mut de = self.try_step()?; - let seq = Accessor::tuple(&mut de, len)?; - visitor.visit_seq(seq) - } - - #[inline] - fn deserialize_tuple_struct( - self, - _name: &'static str, - len: usize, - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.deserialize_tuple(len, visitor) - } - - #[inline] - fn deserialize_map(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let mut de = self.try_step()?; - let map = Accessor::map(&mut de)?; - visitor.visit_map(map) - } - - #[inline] - fn deserialize_struct( - self, - _name: &'static str, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - self.deserialize_map(visitor) - } - - #[inline] - fn deserialize_enum( - self, - _name: &'static str, - _variants: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - let mut de = self.try_step()?; - let accessor = EnumAccessor::enum_(&mut de)?; - visitor.visit_enum(accessor) - } - - #[inline] - fn deserialize_identifier(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - self.deserialize_str(visitor) - } - - #[inline] - fn deserialize_ignored_any(self, visitor: V) -> Result - where - V: Visitor<'de>, - { - let _ignore = dec::IgnoredAny::decode(&mut self.reader)?; - visitor.visit_unit() - } - - #[inline] - fn is_human_readable(&self) -> bool { - false - } -} - -struct Accessor<'a, R> { - de: &'a mut Deserializer, - len: usize, -} - -impl<'de, 'a, R: dec::Read<'de>> Accessor<'a, R> { - #[inline] - pub fn array(de: &'a mut Deserializer) -> Result, DecodeError> { - let array_start = dec::ArrayStart::decode(&mut de.reader)?; - array_start.0.map_or_else( - || Err(DecodeError::IndefiniteSize), - move |len| Ok(Accessor { de, len }), - ) - } - - #[inline] - pub fn tuple( - de: &'a mut Deserializer, - len: usize, - ) -> Result, DecodeError> { - let array_start = dec::ArrayStart::decode(&mut de.reader)?; - - if array_start.0 == Some(len) { - Ok(Accessor { de, len }) - } else { - Err(DecodeError::RequireLength { - name: "tuple", - expect: len, - value: array_start.0.unwrap_or(0), - }) - } - } - - #[inline] - pub fn map(de: &'a mut Deserializer) -> Result, DecodeError> { - let map_start = dec::MapStart::decode(&mut de.reader)?; - map_start.0.map_or_else( - || Err(DecodeError::IndefiniteSize), - move |len| Ok(Accessor { de, len }), - ) - } -} - -impl<'de, 'a, R> de::SeqAccess<'de> for Accessor<'a, R> -where - R: dec::Read<'de>, -{ - type Error = DecodeError; - - #[inline] - fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> - where - T: de::DeserializeSeed<'de>, - { - if self.len > 0 { - self.len -= 1; - Ok(Some(seed.deserialize(&mut *self.de)?)) - } else { - Ok(None) - } - } - - #[inline] - fn size_hint(&self) -> Option { - Some(self.len) - } -} - -impl<'de, 'a, R: dec::Read<'de>> de::MapAccess<'de> for Accessor<'a, R> { - type Error = DecodeError; - - #[inline] - fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> - where - K: de::DeserializeSeed<'de>, - { - if self.len > 0 { - self.len -= 1; - Ok(Some(seed.deserialize(&mut *self.de)?)) - } else { - Ok(None) - } - } - - #[inline] - fn next_value_seed(&mut self, seed: V) -> Result - where - V: de::DeserializeSeed<'de>, - { - seed.deserialize(&mut *self.de) - } - - #[inline] - fn size_hint(&self) -> Option { - Some(self.len) - } -} - -struct EnumAccessor<'a, R> { - de: &'a mut Deserializer, -} - -impl<'de, 'a, R: dec::Read<'de>> EnumAccessor<'a, R> { - #[inline] - pub fn enum_( - de: &'a mut Deserializer, - ) -> Result, DecodeError> { - let byte = peek_one(&mut de.reader)?; - match dec::if_major(byte) { - // string - major::STRING => Ok(EnumAccessor { de }), - // 1 length map - major::MAP if byte == (major::MAP << 5) | 1 => { - de.reader.advance(1); - Ok(EnumAccessor { de }) - } - _ => Err(DecodeError::TypeMismatch { name: "enum", byte }), - } - } -} - -impl<'de, 'a, R> de::EnumAccess<'de> for EnumAccessor<'a, R> -where - R: dec::Read<'de>, -{ - type Error = DecodeError; - type Variant = EnumAccessor<'a, R>; - - #[inline] - fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> - where - V: de::DeserializeSeed<'de>, - { - let variant = seed.deserialize(&mut *self.de)?; - Ok((variant, self)) - } -} - -impl<'de, 'a, R> de::VariantAccess<'de> for EnumAccessor<'a, R> -where - R: dec::Read<'de>, -{ - type Error = DecodeError; - - #[inline] - fn unit_variant(self) -> Result<(), Self::Error> { - Ok(()) - } - - #[inline] - fn newtype_variant_seed(self, seed: T) -> Result - where - T: de::DeserializeSeed<'de>, - { - seed.deserialize(&mut *self.de) - } - - #[inline] - fn tuple_variant(self, len: usize, visitor: V) -> Result - where - V: Visitor<'de>, - { - use serde::Deserializer; - - self.de.deserialize_tuple(len, visitor) - } - - #[inline] - fn struct_variant( - self, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: Visitor<'de>, - { - use serde::Deserializer; - - self.de.deserialize_map(visitor) - } -} - -/// Deserialize a DAG-CBOR encoded CID. -/// -/// This is without the CBOR tag information. It is only the CBOR byte string identifier (major -/// type 2), the number of bytes, and a null byte prefixed CID. -/// -/// The reason for not including the CBOR tag information is the [`Value`] implementation. That one -/// starts to parse the bytes, before we could interfere. If the data only includes a CID, we are -/// parsing over the tag to determine whether it is a CID or not and go from there. -struct CidDeserializer<'a, R>(&'a mut Deserializer); - -impl<'de, 'a, R: dec::Read<'de>> de::Deserializer<'de> for &'a mut CidDeserializer<'a, R> { - type Error = DecodeError; - - fn deserialize_any>(self, _visitor: V) -> Result { - Err(de::Error::custom( - "Only bytes can be deserialized into a CID", - )) - } - - #[inline] - fn deserialize_bytes>(self, visitor: V) -> Result { - let byte = peek_one(&mut self.0.reader)?; - match dec::if_major(byte) { - major::BYTES => { - // CBOR encoded CIDs have a zero byte prefix we have to remove. - match >>::decode(&mut self.0.reader)?.0 { - Cow::Borrowed(buf) => { - if buf.len() <= 1 || buf[0] != 0 { - Err(DecodeError::Msg("Invalid CID".into())) - } else { - visitor.visit_borrowed_bytes(&buf[1..]) - } - } - Cow::Owned(mut buf) => { - if buf.len() <= 1 || buf[0] != 0 { - Err(DecodeError::Msg("Invalid CID".into())) - } else { - buf.remove(0); - visitor.visit_byte_buf(buf) - } - } - } - } - _ => Err(DecodeError::Unsupported { byte }), - } - } - - fn deserialize_newtype_struct>( - self, - name: &str, - visitor: V, - ) -> Result { - if name == CID_SERDE_PRIVATE_IDENTIFIER { - self.deserialize_bytes(visitor) - } else { - Err(de::Error::custom([ - "This deserializer must not be called on newtype structs other than one named `", - CID_SERDE_PRIVATE_IDENTIFIER, - "`" - ].concat())) - } - } - - serde::forward_to_deserialize_any! { - bool byte_buf char enum f32 f64 i8 i16 i32 i64 identifier ignored_any map option seq str - string struct tuple tuple_struct u8 u16 u32 u64 unit unit_struct - } -} - -/// Check if byte is a major type with indefinite length. -#[inline] -pub fn is_indefinite(byte: u8) -> bool { - byte & marker::START == marker::START -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/error.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/error.rs deleted file mode 100644 index d6a1d9f20b42..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/error.rs +++ /dev/null @@ -1,198 +0,0 @@ -//! When serializing or deserializing DAG-CBOR goes wrong. - -use core::fmt; -use core::num::TryFromIntError; - -#[cfg(not(feature = "std"))] -use alloc::string::{String, ToString}; - -use serde::{de, ser}; - -/// An encoding error. -#[derive(Debug)] -pub enum EncodeError { - /// Custom error message. - Msg(String), - /// IO Error. - Write(E), -} - -impl From for EncodeError { - fn from(err: E) -> EncodeError { - EncodeError::Write(err) - } -} - -#[cfg(feature = "std")] -impl ser::Error for EncodeError { - fn custom(msg: T) -> Self { - EncodeError::Msg(msg.to_string()) - } -} - -#[cfg(not(feature = "std"))] -impl ser::Error for EncodeError { - fn custom(msg: T) -> Self { - EncodeError::Msg(msg.to_string()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for EncodeError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - EncodeError::Msg(_) => None, - EncodeError::Write(err) => Some(err), - } - } -} - -#[cfg(not(feature = "std"))] -impl ser::StdError for EncodeError {} - -impl fmt::Display for EncodeError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl From> for EncodeError { - fn from(err: cbor4ii::EncodeError) -> EncodeError { - match err { - cbor4ii::EncodeError::Write(e) => EncodeError::Write(e), - // Needed as `cbor4ii::EncodeError` is markes as non_exhaustive - _ => EncodeError::Msg(err.to_string()), - } - } -} - -/// A decoding error. -#[derive(Debug)] -pub enum DecodeError { - /// Custom error message. - Msg(String), - /// IO error. - Read(E), - /// End of file. - Eof, - /// Unexpected byte. - Mismatch { - /// Expected CBOR major type. - expect_major: u8, - /// Unexpected byte. - byte: u8, - }, - /// Unexpected type. - TypeMismatch { - /// Type name. - name: &'static str, - /// Type byte. - byte: u8, - }, - /// Too large integer. - CastOverflow(TryFromIntError), - /// Overflowing 128-bit integers. - Overflow { - /// Type of integer. - name: &'static str, - }, - /// Decoding bytes/strings might require a borrow. - RequireBorrowed { - /// Type name (e.g. "bytes", "str"). - name: &'static str, - }, - /// Length wasn't large enough. - RequireLength { - /// Type name. - name: &'static str, - /// Required length. - expect: usize, - /// Given length. - value: usize, - }, - /// Invalid UTF-8. - InvalidUtf8(core::str::Utf8Error), - /// Unsupported byte. - Unsupported { - /// Unsupported bute. - byte: u8, - }, - /// Recursion limit reached. - DepthLimit, - /// Trailing data. - TrailingData, - /// Indefinite sized item was encountered. - IndefiniteSize, -} - -impl From for DecodeError { - fn from(err: E) -> DecodeError { - DecodeError::Read(err) - } -} - -#[cfg(feature = "std")] -impl de::Error for DecodeError { - fn custom(msg: T) -> Self { - DecodeError::Msg(msg.to_string()) - } -} - -#[cfg(not(feature = "std"))] -impl de::Error for DecodeError { - fn custom(msg: T) -> Self { - DecodeError::Msg(msg.to_string()) - } -} - -#[cfg(feature = "std")] -impl std::error::Error for DecodeError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - DecodeError::Msg(_) => None, - DecodeError::Read(err) => Some(err), - _ => None, - } - } -} - -#[cfg(not(feature = "std"))] -impl ser::StdError for DecodeError {} - -impl fmt::Display for DecodeError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl From> for DecodeError { - fn from(err: cbor4ii::DecodeError) -> DecodeError { - match err { - cbor4ii::DecodeError::Read(read) => DecodeError::Read(read), - cbor4ii::DecodeError::Eof => DecodeError::Eof, - cbor4ii::DecodeError::Mismatch { expect_major, byte } => { - DecodeError::Mismatch { expect_major, byte } - } - cbor4ii::DecodeError::TypeMismatch { name, byte } => { - DecodeError::TypeMismatch { name, byte } - } - cbor4ii::DecodeError::CastOverflow(overflow) => DecodeError::CastOverflow(overflow), - cbor4ii::DecodeError::Overflow { name } => DecodeError::Overflow { name }, - cbor4ii::DecodeError::RequireBorrowed { name } => DecodeError::RequireBorrowed { name }, - cbor4ii::DecodeError::RequireLength { - name, - expect, - value, - } => DecodeError::RequireLength { - name, - expect, - value, - }, - cbor4ii::DecodeError::InvalidUtf8(invalid) => DecodeError::InvalidUtf8(invalid), - cbor4ii::DecodeError::Unsupported { byte } => DecodeError::Unsupported { byte }, - cbor4ii::DecodeError::DepthLimit => DecodeError::DepthLimit, - // Needed as `cbor4ii::EncodeError` is markes as non_exhaustive - _ => DecodeError::Msg(err.to_string()), - } - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/lib.rs deleted file mode 100644 index ca8b38e505c0..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/lib.rs +++ /dev/null @@ -1,142 +0,0 @@ -//! DAG-CBOR serialization and deserialization. -//! -//! # Usage -//! -//! Add this to your `Cargo.toml`: -//! ```toml -//! [dependencies] -//! serde_ipld_dagcbor = "0.1.0" -//! ``` -//! -//! Storing and loading Rust types is easy and requires only -//! minimal modifications to the program code. -//! -//! ```rust -//! # #[cfg(not(feature = "std"))] -//! # fn main() {} -//! use serde_derive::{Deserialize, Serialize}; -//! use std::error::Error; -//! use std::fs::File; -//! use std::io::BufReader; -//! -//! // Types annotated with `Serialize` can be stored as CBOR. -//! // To be able to load them again add `Deserialize`. -//! #[derive(Debug, Serialize, Deserialize)] -//! struct Mascot { -//! name: String, -//! species: String, -//! year_of_birth: u32, -//! } -//! -//! # #[cfg(feature = "std")] -//! fn main() -> Result<(), Box> { -//! let ferris = Mascot { -//! name: "Ferris".to_owned(), -//! species: "crab".to_owned(), -//! year_of_birth: 2015, -//! }; -//! -//! let ferris_file = File::create("examples/ferris.cbor")?; -//! // Write Ferris to the given file. -//! // Instead of a file you can use any type that implements `io::Write` -//! // like a HTTP body, database connection etc. -//! serde_ipld_dagcbor::to_writer(ferris_file, &ferris)?; -//! -//! let tux_file = File::open("examples/tux.cbor")?; -//! let tux_reader = BufReader::new(tux_file); -//! // Load Tux from a file. -//! // Serde CBOR performs roundtrip serialization meaning that -//! // the data will not change in any way. -//! let tux: Mascot = serde_ipld_dagcbor::from_reader(tux_reader)?; -//! -//! println!("{:?}", tux); -//! // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 } -//! -//! Ok(()) -//! } -//! ``` -//! -//! There are a lot of options available to customize the format. -//! To operate on untyped DAG-CBOR values have a look at the [`libipld_core::ipld::Ipld`] type. -//! -//! # Type-based Serialization and Deserialization -//! Serde provides a mechanism for low boilerplate serialization & deserialization of values to and -//! from CBOR via the serialization API. To be able to serialize a piece of data, it must implement -//! the `serde::Serialize` trait. To be able to deserialize a piece of data, it must implement the -//! `serde::Deserialize` trait. Serde provides an annotation to automatically generate the -//! code for these traits: `#[derive(Serialize, Deserialize)]`. -//! -//! Read a general CBOR value with an unknown content. -//! -//! ```rust -//! use serde_ipld_dagcbor::from_slice; -//! use libipld_core::ipld::Ipld; -//! -//! let slice = b"\x82\x01\xa1aaab"; -//! let value: Ipld = from_slice(slice).unwrap(); -//! println!("{:?}", value); // List([Integer(1), Map({"a": String("b")})]) -//! ``` -//! -//! Serialize an object. -//! -//! ```rust -//! use std::collections::BTreeMap; -//! use serde_ipld_dagcbor::to_vec; -//! -//! let mut programming_languages = BTreeMap::new(); -//! programming_languages.insert("rust", vec!["safe", "concurrent", "fast"]); -//! programming_languages.insert("python", vec!["powerful", "friendly", "open"]); -//! programming_languages.insert("js", vec!["lightweight", "interpreted", "object-oriented"]); -//! let encoded = to_vec(&programming_languages); -//! assert_eq!(encoded.unwrap().len(), 103); -//! ``` -//! -//! # `no-std` support -//! -//! Serde CBOR supports building in a `no_std` context, use the following lines -//! in your `Cargo.toml` dependencies: -//! ``` toml -//! [dependencies] -//! serde = { version = "1.0", default-features = false } -//! serde_ipld_dagcbor = { version = "0.1.0", default-features = false } -//! ``` -//! -//! Without the `std` feature the functions [from_reader], and [to_writer] are not exported. -//! -//! *Note*: to use derive macros in serde you will need to declare `serde` -//! dependency like so: -//! ``` toml -//! serde = { version = "1.0", default-features = false, features = ["derive"] } -//! ``` - -#![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(not(feature = "std"))] -extern crate alloc; - -mod cbor4ii_nonpub; -pub mod de; -pub mod error; -pub mod ser; - -#[doc(inline)] -pub use crate::error::{DecodeError, EncodeError}; - -// Convenience functions for serialization and deserialization. -#[doc(inline)] -pub use crate::de::from_slice; - -#[cfg(feature = "std")] -#[doc(inline)] -pub use crate::de::from_reader; - -#[doc(inline)] -pub use crate::ser::to_vec; - -#[cfg(feature = "std")] -#[doc(inline)] -pub use crate::ser::to_writer; - -/// The CBOR tag that is used for CIDs. -const CBOR_TAGS_CID: u64 = 42; diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/ser.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/ser.rs deleted file mode 100644 index cf201ffec93f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/ser.rs +++ /dev/null @@ -1,650 +0,0 @@ -//! Serialization. -#[cfg(not(feature = "std"))] -use alloc::collections::TryReserveError; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -#[cfg(feature = "std")] -use std::collections::TryReserveError; - -pub use cbor4ii::core::utils::BufWriter; -#[cfg(feature = "std")] -use cbor4ii::core::utils::IoWriter; -use cbor4ii::core::{ - enc::{self, Encode}, - types, -}; -use cid::serde::CID_SERDE_PRIVATE_IDENTIFIER; -use serde::{ser, Serialize}; - -use crate::error::EncodeError; -use crate::CBOR_TAGS_CID; - -/// Serializes a value to a vector. -pub fn to_vec(value: &T) -> Result, EncodeError> -where - T: Serialize + ?Sized, -{ - let writer = BufWriter::new(Vec::new()); - let mut serializer = Serializer::new(writer); - value.serialize(&mut serializer)?; - Ok(serializer.into_inner().into_inner()) -} - -/// Serializes a value to a writer. -#[cfg(feature = "std")] -pub fn to_writer(writer: W, value: &T) -> Result<(), EncodeError> -where - W: std::io::Write, - T: Serialize, -{ - let mut serializer = Serializer::new(IoWriter::new(writer)); - value.serialize(&mut serializer) -} - -/// A structure for serializing Rust values to DAG-CBOR. -struct Serializer { - writer: W, -} - -impl Serializer { - /// Creates a new CBOR serializer. - pub fn new(writer: W) -> Serializer { - Serializer { writer } - } - - /// Returns the underlying writer. - pub fn into_inner(self) -> W { - self.writer - } -} - -impl<'a, W: enc::Write> serde::Serializer for &'a mut Serializer { - type Ok = (); - type Error = EncodeError; - - type SerializeSeq = Collect<'a, W>; - type SerializeTuple = BoundedCollect<'a, W>; - type SerializeTupleStruct = BoundedCollect<'a, W>; - type SerializeTupleVariant = BoundedCollect<'a, W>; - type SerializeMap = Collect<'a, W>; - type SerializeStruct = BoundedCollect<'a, W>; - type SerializeStructVariant = BoundedCollect<'a, W>; - - #[inline] - fn serialize_bool(self, v: bool) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_i8(self, v: i8) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_i16(self, v: i16) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_i32(self, v: i32) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_i64(self, v: i64) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_u8(self, v: u8) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_u16(self, v: u16) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_u32(self, v: u32) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_u64(self, v: u64) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_f32(self, v: f32) -> Result { - // In DAG-CBOR floats are always encoded as f64. - self.serialize_f64(f64::from(v)) - } - - #[inline] - fn serialize_f64(self, v: f64) -> Result { - // In DAG-CBOR only finite floats are supported. - if !v.is_finite() { - Err(EncodeError::Msg( - "Float must be a finite number, not Infinity or NaN".into(), - )) - } else { - v.encode(&mut self.writer)?; - Ok(()) - } - } - - #[inline] - fn serialize_char(self, v: char) -> Result { - let mut buf = [0; 4]; - self.serialize_str(v.encode_utf8(&mut buf)) - } - - #[inline] - fn serialize_str(self, v: &str) -> Result { - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_bytes(self, v: &[u8]) -> Result { - types::Bytes(v).encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_none(self) -> Result { - types::Null.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_some(self, value: &T) -> Result { - value.serialize(self) - } - - #[inline] - fn serialize_unit(self) -> Result { - // The cbor4ii Serde implementation encodes unit as an empty array, for DAG-CBOR we encode - // it as `NULL`. - types::Null.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_unit_struct(self, _name: &'static str) -> Result { - self.serialize_unit() - } - - #[inline] - fn serialize_unit_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - ) -> Result { - self.serialize_str(variant) - } - - #[inline] - fn serialize_newtype_struct( - self, - name: &'static str, - value: &T, - ) -> Result { - if name == CID_SERDE_PRIVATE_IDENTIFIER { - value.serialize(&mut CidSerializer(self)) - } else { - value.serialize(self) - } - } - - #[inline] - fn serialize_newtype_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - value: &T, - ) -> Result { - enc::MapStartBounded(1).encode(&mut self.writer)?; - variant.encode(&mut self.writer)?; - value.serialize(self) - } - - #[inline] - fn serialize_seq(self, len: Option) -> Result { - if let Some(len) = len { - enc::ArrayStartBounded(len).encode(&mut self.writer)?; - } else { - enc::ArrayStartUnbounded.encode(&mut self.writer)?; - } - Ok(Collect { - bounded: len.is_some(), - ser: self, - }) - } - - #[inline] - fn serialize_tuple(self, len: usize) -> Result { - enc::ArrayStartBounded(len).encode(&mut self.writer)?; - Ok(BoundedCollect { ser: self }) - } - - #[inline] - fn serialize_tuple_struct( - self, - _name: &'static str, - len: usize, - ) -> Result { - self.serialize_tuple(len) - } - - #[inline] - fn serialize_tuple_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - len: usize, - ) -> Result { - enc::MapStartBounded(1).encode(&mut self.writer)?; - variant.encode(&mut self.writer)?; - enc::ArrayStartBounded(len).encode(&mut self.writer)?; - Ok(BoundedCollect { ser: self }) - } - - #[inline] - fn serialize_map(self, len: Option) -> Result { - if let Some(len) = len { - enc::MapStartBounded(len).encode(&mut self.writer)?; - } else { - enc::MapStartUnbounded.encode(&mut self.writer)?; - } - Ok(Collect { - bounded: len.is_some(), - ser: self, - }) - } - - #[inline] - fn serialize_struct( - self, - _name: &'static str, - len: usize, - ) -> Result { - enc::MapStartBounded(len).encode(&mut self.writer)?; - Ok(BoundedCollect { ser: self }) - } - - #[inline] - fn serialize_struct_variant( - self, - _name: &'static str, - _variant_index: u32, - variant: &'static str, - len: usize, - ) -> Result { - enc::MapStartBounded(1).encode(&mut self.writer)?; - variant.encode(&mut self.writer)?; - enc::MapStartBounded(len).encode(&mut self.writer)?; - Ok(BoundedCollect { ser: self }) - } - - #[inline] - fn serialize_i128(self, v: i128) -> Result { - if !(u64::MAX as i128 >= v && -(u64::MAX as i128 + 1) <= v) { - return Err(EncodeError::Msg( - "Integer must be within [-u64::MAX-1, u64::MAX] range".into(), - )); - } - - v.encode(&mut self.writer)?; - Ok(()) - } - - #[inline] - fn serialize_u128(self, v: u128) -> Result { - if (u64::MAX as u128) < v { - return Err(EncodeError::Msg( - "Unsigned integer must be within [0, u64::MAX] range".into(), - )); - } - v.encode(&mut self.writer)?; - Ok(()) - } - - fn collect_map(self, iter: I) -> Result<(), Self::Error> - where - K: ser::Serialize, - V: ser::Serialize, - I: IntoIterator, - { - // CBOR RFC-7049 specifies a canonical sort order, where keys are sorted by length first. - // This was later revised with RFC-8949, but we need to stick to the original order to stay - // compatible with existing data. - // We first serialize each map entry into a buffer and then sort those buffers. Byte-wise - // comparison gives us the right order as keys in DAG-CBOR are always strings and prefixed - // with the length. Once sorted they are written to the actual output. - let mut buffer = BufWriter::new(Vec::new()); - let mut entries = Vec::new(); - for (key, value) in iter { - let mut mem_serializer = Serializer::new(&mut buffer); - key.serialize(&mut mem_serializer) - .map_err(|_| EncodeError::Msg("Map key cannot be serialized.".into()))?; - value - .serialize(&mut mem_serializer) - .map_err(|_| EncodeError::Msg("Map key cannot be serialized.".into()))?; - entries.push(buffer.buffer().to_vec()); - buffer.clear(); - } - - enc::MapStartBounded(entries.len()).encode(&mut self.writer)?; - entries.sort_unstable(); - for entry in entries { - self.writer.push(&entry)?; - } - - Ok(()) - } - - #[inline] - fn is_human_readable(&self) -> bool { - false - } -} - -struct Collect<'a, W> { - bounded: bool, - ser: &'a mut Serializer, -} - -struct BoundedCollect<'a, W> { - ser: &'a mut Serializer, -} - -impl serde::ser::SerializeSeq for Collect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> { - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - if !self.bounded { - enc::End.encode(&mut self.ser.writer)?; - } - - Ok(()) - } -} - -impl serde::ser::SerializeTuple for BoundedCollect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> { - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - Ok(()) - } -} - -impl serde::ser::SerializeTupleStruct for BoundedCollect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> { - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - Ok(()) - } -} - -impl serde::ser::SerializeTupleVariant for BoundedCollect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> { - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - Ok(()) - } -} - -impl serde::ser::SerializeMap for Collect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> { - key.serialize(&mut *self.ser) - } - - #[inline] - fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> { - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - if !self.bounded { - enc::End.encode(&mut self.ser.writer)?; - } - - Ok(()) - } -} - -impl serde::ser::SerializeStruct for BoundedCollect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_field( - &mut self, - key: &'static str, - value: &T, - ) -> Result<(), Self::Error> { - key.serialize(&mut *self.ser)?; - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - Ok(()) - } -} - -impl serde::ser::SerializeStructVariant for BoundedCollect<'_, W> { - type Ok = (); - type Error = EncodeError; - - #[inline] - fn serialize_field( - &mut self, - key: &'static str, - value: &T, - ) -> Result<(), Self::Error> { - key.serialize(&mut *self.ser)?; - value.serialize(&mut *self.ser) - } - - #[inline] - fn end(self) -> Result { - Ok(()) - } -} - -/// Serializing a CID correctly as DAG-CBOR. -struct CidSerializer<'a, W>(&'a mut Serializer); - -impl<'a, W: enc::Write> ser::Serializer for &'a mut CidSerializer<'a, W> -where - W::Error: core::fmt::Debug, -{ - type Ok = (); - type Error = EncodeError; - - type SerializeSeq = ser::Impossible; - type SerializeTuple = ser::Impossible; - type SerializeTupleStruct = ser::Impossible; - type SerializeTupleVariant = ser::Impossible; - type SerializeMap = ser::Impossible; - type SerializeStruct = ser::Impossible; - type SerializeStructVariant = ser::Impossible; - - fn serialize_bool(self, _value: bool) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_i8(self, _value: i8) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_i16(self, _value: i16) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_i32(self, _value: i32) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_i64(self, _value: i64) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_u8(self, _value: u8) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_u16(self, _value: u16) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_u32(self, _value: u32) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_u64(self, _value: u64) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_f32(self, _value: f32) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_f64(self, _value: f64) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_char(self, _value: char) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_str(self, _value: &str) -> Result { - Err(ser::Error::custom("unreachable")) - } - - fn serialize_bytes(self, value: &[u8]) -> Result { - // The bytes of the CID is prefixed with a null byte when encoded as CBOR. - let prefixed = [&[0x00], value].concat(); - // CIDs are serialized with CBOR tag 42. - types::Tag(CBOR_TAGS_CID, types::Bytes(&prefixed[..])).encode(&mut self.0.writer)?; - Ok(()) - } - - fn serialize_none(self) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_some( - self, - _value: &T, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_unit(self) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_unit_struct(self, _name: &str) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_unit_variant( - self, - _name: &str, - _variant_index: u32, - _variant: &str, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - - fn serialize_newtype_struct( - self, - _name: &str, - _value: &T, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_newtype_variant( - self, - _name: &str, - _variant_index: u32, - _variant: &str, - _value: &T, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_seq(self, _len: Option) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_tuple(self, _len: usize) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_tuple_struct( - self, - _name: &str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_tuple_variant( - self, - _name: &str, - _variant_index: u32, - _variant: &str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_map(self, _len: Option) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_struct( - self, - _name: &str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } - fn serialize_struct_variant( - self, - _name: &str, - _variant_index: u32, - _variant: &str, - _len: usize, - ) -> Result { - Err(ser::Error::custom("unreachable")) - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/cid.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/cid.rs deleted file mode 100644 index 7369b7afc123..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/cid.rs +++ /dev/null @@ -1,268 +0,0 @@ -use std::convert::{TryFrom, TryInto}; -use std::io::Cursor; -use std::str::FromStr; - -use cid::Cid; -use libipld_core::ipld::Ipld; -use serde::de; -use serde::{Deserialize, Serialize}; -use serde_bytes::ByteBuf; -use serde_ipld_dagcbor::{from_reader, from_slice, to_vec}; - -#[test] -fn test_cid_struct() { - #[derive(Debug, PartialEq, Deserialize, Serialize)] - struct MyStruct { - cid: Cid, - data: bool, - } - - let cid = Cid::from_str("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); - let cid_encoded = to_vec(&cid).unwrap(); - assert_eq!( - cid_encoded, - [ - 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, - 0xff, 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, - 0x70, 0x64, 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, - ] - ); - - let cid_decoded_as_cid: Cid = from_slice(&cid_encoded).unwrap(); - assert_eq!(cid_decoded_as_cid, cid); - - let cid_decoded_as_ipld: Ipld = from_slice(&cid_encoded).unwrap(); - assert_eq!(cid_decoded_as_ipld, Ipld::Link(cid)); - - // Tests with the Type nested in a struct - - let mystruct = MyStruct { cid, data: true }; - let mystruct_encoded = to_vec(&mystruct).unwrap(); - assert_eq!( - mystruct_encoded, - [ - 0xa2, 0x63, 0x63, 0x69, 0x64, 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, - 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, - 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, - 0x62, 0x66, 0xe7, 0xae, 0x64, 0x64, 0x61, 0x74, 0x61, 0xf5 - ] - ); - - let mystruct_decoded_as_mystruct: MyStruct = from_slice(&mystruct_encoded).unwrap(); - assert_eq!(mystruct_decoded_as_mystruct, mystruct); - - let mystruct_decoded_as_ipld: Ipld = from_slice(&mystruct_encoded).unwrap(); - let mut expected_map = std::collections::BTreeMap::new(); - expected_map.insert("cid".to_string(), Ipld::Link(cid)); - expected_map.insert("data".to_string(), Ipld::Bool(true)); - assert_eq!(mystruct_decoded_as_ipld, Ipld::Map(expected_map)); -} - -/// Test that arbitrary bytes are not interpreted as CID. -#[test] -fn test_binary_not_as_cid() { - // h'affe' - // 42 # bytes(2) - // AFFE # "\xAF\xFE" - let bytes = [0x42, 0xaf, 0xfe]; - let bytes_as_ipld: Ipld = from_slice(&bytes).unwrap(); - assert_eq!(bytes_as_ipld, Ipld::Bytes(vec![0xaf, 0xfe])); -} - -/// Test that CIDs don't decode into byte buffers, lists, etc. -#[test] -fn test_cid_not_as_bytes() { - let cbor_cid = [ - 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, - 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, - 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, - ]; - from_slice::>(&cbor_cid).expect_err("shouldn't have parsed a tagged CID as a sequence"); - from_slice::(&cbor_cid) - .expect_err("shouldn't have parsed a tagged CID as a byte array"); - from_slice::(&cbor_cid[2..]) - .expect("should have parsed an untagged CID as a byte array"); -} - -/// Test whether a binary CID could be serialized if it isn't prefixed by tag 42. It should fail. -#[test] -fn test_cid_bytes_without_tag() { - let cbor_cid = [ - 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, - 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, - 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, - ]; - let decoded_cbor_cid: Cid = from_slice(&cbor_cid).unwrap(); - assert_eq!(decoded_cbor_cid.to_bytes(), &cbor_cid[5..]); - - // The CID without the tag 42 prefix - let cbor_bytes = &cbor_cid[2..]; - from_slice::(cbor_bytes).expect_err("should have failed to decode bytes as cid"); -} - -/// This test shows how a kinded enum could be implemented. -#[test] -fn test_cid_in_kinded_enum() { - #[derive(Debug, PartialEq)] - pub enum Kinded { - Bytes(ByteBuf), - Link(Cid), - } - - let cbor_cid = [ - 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, - 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, - 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, - ]; - - impl TryFrom for Kinded { - type Error = (); - - fn try_from(ipld: Ipld) -> Result { - match ipld { - Ipld::Bytes(bytes) => Ok(Self::Bytes(ByteBuf::from(bytes))), - Ipld::Link(cid) => Ok(Self::Link(cid)), - _ => Err(()), - } - } - } - - impl<'de> de::Deserialize<'de> for Kinded { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - Ipld::deserialize(deserializer).and_then(|ipld| { - ipld.try_into() - .map_err(|_| de::Error::custom("No matching enum variant found")) - }) - } - } - - let decoded_cid: Kinded = from_slice(&cbor_cid).unwrap(); - let cid = Cid::try_from(&cbor_cid[5..]).unwrap(); - assert_eq!(decoded_cid, Kinded::Link(cid)); - - // The CID without the tag 42 prefix - let cbor_bytes = &cbor_cid[2..]; - let decoded_bytes: Kinded = from_slice(cbor_bytes).unwrap(); - // The CBOR decoded bytes don't contain the prefix with the bytes type identifier and the - // length. - let bytes = cbor_bytes[2..].to_vec(); - assert_eq!(decoded_bytes, Kinded::Bytes(ByteBuf::from(bytes))); - - // Check that random bytes cannot be deserialized. - let random_bytes = &cbor_cid[10..]; - let decoded_random_bytes: Result = from_slice(random_bytes); - assert!(decoded_random_bytes.is_err()); -} - -/// This test shows how a kinded enum could be implemented, when bytes as well as a CID are wrapped -/// in a newtype struct. -#[test] -fn test_cid_in_kinded_enum_with_newtype() { - #[derive(Debug, Deserialize, PartialEq)] - pub struct Foo(#[serde(with = "serde_bytes")] Vec); - - #[derive(Debug, PartialEq)] - pub enum Kinded { - MyBytes(Foo), - Link(Cid), - } - - let cbor_cid = [ - 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, - 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, - 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, - ]; - - impl TryFrom for Kinded { - type Error = (); - - fn try_from(ipld: Ipld) -> Result { - match ipld { - Ipld::Bytes(bytes) => Ok(Self::MyBytes(Foo(bytes))), - Ipld::Link(cid) => Ok(Self::Link(cid)), - _ => Err(()), - } - } - } - - impl<'de> de::Deserialize<'de> for Kinded { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - Ipld::deserialize(deserializer).and_then(|ipld| { - ipld.try_into() - .map_err(|_| de::Error::custom("No matching enum variant found")) - }) - } - } - - let decoded_cid: Kinded = from_slice(&cbor_cid).unwrap(); - // The actual CID is without the CBOR tag 42, the byte identifier and the data length. - let cid = Cid::try_from(&cbor_cid[5..]).unwrap(); - assert_eq!(decoded_cid, Kinded::Link(cid)); - - // The CID without the tag 42 prefix - let cbor_bytes = &cbor_cid[2..]; - let decoded_bytes: Kinded = from_slice(cbor_bytes).unwrap(); - // The CBOR decoded bytes don't contain the prefix with the bytes type identifier and the - // length. - let bytes = cbor_bytes[2..].to_vec(); - assert_eq!(decoded_bytes, Kinded::MyBytes(Foo(bytes))); - - // Check that random bytes cannot be deserialized. - let random_bytes = &cbor_cid[10..]; - let decoded_random_bytes: Result = from_slice(random_bytes); - assert!(decoded_random_bytes.is_err()); -} - -#[test] -fn test_cid_empty_errors() { - // Tag 42 with zero bytes - let cbor_empty_cid = [0xd8, 0x2a, 0x40]; - - let decoded: Result = from_slice(&cbor_empty_cid); - assert!(decoded.is_err()); -} - -#[test] -fn test_cid_non_minimally_encoded() { - let cid = Cid::from_str("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); - let cid_encoded = to_vec(&cid).unwrap(); - - let decoded: Cid = from_slice(&cid_encoded).unwrap(); - assert_eq!(decoded, cid); - - // Strip off the CBOR tag. - let without_tag = &cid_encoded[2..]; - - let tag_2_bytes_encoded = [&[0xd9, 0x00, 0x2a], without_tag].concat(); - let tag_2_bytes_decoded: Cid = from_slice(&tag_2_bytes_encoded).unwrap(); - assert_eq!(tag_2_bytes_decoded, cid); - - let tag_4_bytes_encoded = [&[0xda, 0x00, 0x00, 0x00, 0x2a], without_tag].concat(); - let tag_4_bytes_decoded: Cid = from_slice(&tag_4_bytes_encoded).unwrap(); - assert_eq!(tag_4_bytes_decoded, cid); - - let tag_8_bytes_encoded = [ - &[0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a], - without_tag, - ] - .concat(); - let tag_8_bytes_decoded: Cid = from_slice(&tag_8_bytes_encoded).unwrap(); - assert_eq!(tag_8_bytes_decoded, cid); -} - -#[test] -fn test_cid_decode_from_reader() { - let cid_encoded = [ - 0xd8, 0x2a, 0x49, 0x00, 0x01, 0xce, 0x01, 0x9b, 0x01, 0x02, 0x63, 0xc8, - ]; - println!("vmx: cid: {:?}", cid_encoded); - let cid_decoded: Cid = from_reader(Cursor::new(&cid_encoded)).unwrap(); - println!("vmx: cid: {:?}", cid_decoded); - assert_eq!(&cid_encoded[4..], &cid_decoded.to_bytes()); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/de.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/de.rs deleted file mode 100644 index ef49b7cdbf44..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/de.rs +++ /dev/null @@ -1,316 +0,0 @@ -use std::collections::BTreeMap; - -use libipld_core::ipld::Ipld; -use serde_ipld_dagcbor::{de, to_vec, DecodeError}; - -#[test] -fn test_string1() { - let ipld: Result = de::from_slice(&[0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]); - assert_eq!(ipld.unwrap(), Ipld::String("foobar".to_string())); -} - -#[test] -fn test_string2() { - let ipld: Result = de::from_slice(&[ - 0x71, 0x49, 0x20, 0x6d, 0x65, 0x74, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x76, 0x65, 0x6c, - 0x6c, 0x65, 0x72, - ]); - assert_eq!(ipld.unwrap(), Ipld::String("I met a traveller".to_string())); -} - -#[test] -fn test_string3() { - let slice = b"\x78\x2fI met a traveller from an antique land who said"; - let ipld: Result = de::from_slice(slice); - assert_eq!( - ipld.unwrap(), - Ipld::String("I met a traveller from an antique land who said".to_string()) - ); -} - -#[test] -fn test_byte_string() { - let ipld: Result = de::from_slice(&[0x46, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]); - assert_eq!(ipld.unwrap(), Ipld::Bytes(b"foobar".to_vec())); -} - -#[test] -fn test_numbers1() { - let ipld: Result = de::from_slice(&[0x00]); - assert_eq!(ipld.unwrap(), Ipld::Integer(0)); -} - -#[test] -fn test_numbers2() { - let ipld: Result = de::from_slice(&[0x1a, 0x00, 0xbc, 0x61, 0x4e]); - assert_eq!(ipld.unwrap(), Ipld::Integer(12345678)); -} - -#[test] -fn test_numbers3() { - let ipld: Result = de::from_slice(&[0x39, 0x07, 0xde]); - assert_eq!(ipld.unwrap(), Ipld::Integer(-2015)); -} - -#[test] -fn test_numbers_large_negative() { - let ipld: Result = - de::from_slice(&[0x3b, 0xa5, 0xf7, 0x02, 0xb3, 0xa5, 0xf7, 0x02, 0xb3]); - let expected: i128 = -11959030306112471732; - assert!(expected < i128::from(i64::MIN)); - assert_eq!(ipld.unwrap(), Ipld::Integer(expected)); -} - -#[test] -fn test_bool() { - let ipld: Result = de::from_slice(b"\xf4"); - assert_eq!(ipld.unwrap(), Ipld::Bool(false)); -} - -#[test] -fn test_trailing_bytes() { - let ipld: Result = de::from_slice(b"\xf4trailing"); - assert!(matches!(ipld.unwrap_err(), DecodeError::TrailingData)); -} - -#[test] -fn test_list1() { - let ipld: Result = de::from_slice(b"\x83\x01\x02\x03"); - assert_eq!( - ipld.unwrap(), - Ipld::List(vec![Ipld::Integer(1), Ipld::Integer(2), Ipld::Integer(3)]) - ); -} - -#[test] -fn test_list2() { - let ipld: Result = de::from_slice(b"\x82\x01\x82\x02\x81\x03"); - assert_eq!( - ipld.unwrap(), - Ipld::List(vec![ - Ipld::Integer(1), - Ipld::List(vec![Ipld::Integer(2), Ipld::List(vec![Ipld::Integer(3)])]) - ]) - ); -} - -#[test] -fn test_object() { - let ipld: Result = de::from_slice(b"\xa5aaaAabaBacaCadaDaeaE"); - let mut object = BTreeMap::new(); - object.insert("a".to_string(), Ipld::String("A".to_string())); - object.insert("b".to_string(), Ipld::String("B".to_string())); - object.insert("c".to_string(), Ipld::String("C".to_string())); - object.insert("d".to_string(), Ipld::String("D".to_string())); - object.insert("e".to_string(), Ipld::String("E".to_string())); - assert_eq!(ipld.unwrap(), Ipld::Map(object)); -} - -#[test] -fn test_indefinite_object_error() { - let ipld: Result = de::from_slice(b"\xbfaa\x01ab\x9f\x02\x03\xff\xff"); - let mut object = BTreeMap::new(); - object.insert("a".to_string(), Ipld::Integer(1)); - object.insert( - "b".to_string(), - Ipld::List(vec![Ipld::Integer(2), Ipld::Integer(3)]), - ); - assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); -} - -#[test] -fn test_indefinite_list_error() { - let ipld: Result = de::from_slice(b"\x9f\x01\x02\x03\xff"); - assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); -} - -#[test] -fn test_indefinite_string_error() { - let ipld: Result = - de::from_slice(b"\x7f\x65Mary \x64Had \x62a \x67Little \x60\x64Lamb\xff"); - assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); -} - -#[test] -fn test_indefinite_byte_string_error() { - let ipld: Result = de::from_slice(b"\x5f\x42\x01\x23\x42\x45\x67\xff"); - assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); -} - -#[test] -fn test_multiple_indefinite_strings_error() { - let input = b"\x82\x7f\x65Mary \x64Had \x62a \x67Little \x60\x64Lamb\xff\x5f\x42\x01\x23\x42\x45\x67\xff"; - let ipld: Result = de::from_slice(input); - assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); -} - -#[test] -fn test_float() { - let ipld: Result = de::from_slice(b"\xfa\x47\xc3\x50\x00"); - assert_eq!(ipld.unwrap(), Ipld::Float(100000.0)); -} - -#[test] -fn test_rejected_tag() { - let ipld: Result = - de::from_slice(&[0xd9, 0xd9, 0xf7, 0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]); - assert!(matches!( - ipld.unwrap_err(), - DecodeError::TypeMismatch { - name: "CBOR tag", - byte: 0xf7 - } - )); -} - -#[test] -fn test_crazy_list() { - let slice = b"\x86\x1b\x00\x00\x00\x1c\xbe\x99\x1d\xc7\x3b\x00\x7a\xcf\x51\xdc\x51\x70\xdb\x3a\x1b\x3a\x06\xdd\xf5\xf6\xfb\x41\x76\x5e\xb1\xf8\x00\x00\x00"; - let ipld: Vec = de::from_slice(slice).unwrap(); - assert_eq!( - ipld, - vec![ - Ipld::Integer(123456789959), - Ipld::Integer(-34567897654325468), - Ipld::Integer(-456787678), - Ipld::Bool(true), - Ipld::Null, - Ipld::Float(23456543.5), - ] - ); -} - -#[test] -fn test_nan() { - let ipld: Result = de::from_slice(b"\xf9\x7e\x00"); - assert!(matches!( - ipld.unwrap_err(), - DecodeError::TypeMismatch { .. } - )); -} - -#[test] -// The file was reported as not working by user kie0tauB -// but it parses to a cbor value. -fn test_kietaub_file() { - let file = include_bytes!("kietaub.cbor"); - let value_result: Result = de::from_slice(file); - value_result.unwrap(); -} - -#[test] -fn test_option_roundtrip() { - let obj1 = Some(10u32); - - let v = to_vec(&obj1).unwrap(); - let obj2: Result, _> = de::from_slice(&v[..]); - println!("{:?}", obj2); - - assert_eq!(obj1, obj2.unwrap()); -} - -#[test] -fn test_option_none_roundtrip() { - let obj1 = None; - - let v = to_vec(&obj1).unwrap(); - println!("{:?}", v); - let obj2: Result, _> = de::from_slice(&v[..]); - - assert_eq!(obj1, obj2.unwrap()); -} - -#[test] -fn test_unit() { - #[allow(clippy::let_unit_value)] - let unit = (); - let v = to_vec(&unit).unwrap(); - assert_eq!(v, [0xf6], "unit is serialized as NULL."); - let result: Result<(), _> = from_slice(&v); - assert!(result.is_ok(), "unit was successfully deserialized"); -} - -#[test] -fn test_variable_length_map_error() { - let slice = b"\xbf\x67\x6d\x65\x73\x73\x61\x67\x65\x64\x70\x6f\x6e\x67\xff"; - let ipld: Result = de::from_slice(slice); - assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); -} - -#[test] -fn test_object_determinism_roundtrip() { - let expected = b"\xa2aa\x01ab\x82\x02\x03"; - - // 0.1% chance of not catching failure - for _ in 0..10 { - assert_eq!( - &to_vec(&de::from_slice::(expected).unwrap()).unwrap(), - expected - ); - } -} - -#[test] -fn crash() { - let file = include_bytes!("crash.cbor"); - let value_result: Result = de::from_slice(file); - assert!(matches!(value_result.unwrap_err(), DecodeError::Eof)); -} - -use serde_ipld_dagcbor::de::from_slice; -use std::net::{IpAddr, Ipv4Addr}; -#[test] -fn test_ipaddr_deserialization() { - let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - let buf = to_vec(&ip).unwrap(); - let deserialized_ip = from_slice::(&buf).unwrap(); - assert_eq!(ip, deserialized_ip); -} - -#[test] -fn attempt_stack_overflow() { - // Create a tag 17, followed by 999 more tag 17: - // 17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(... - // This causes deep recursion in the decoder and may - // exhaust the stack and therfore result in a stack overflow. - let input = vec![0xd1; 1000]; - serde_ipld_dagcbor::from_slice::(&input).expect_err("recursion limit"); -} - -#[test] -fn truncated_object() { - let input: Vec = [ - &b"\x84\x87\xD8\x2A\x58\x27\x00\x01\x71\xA0\xE4\x02\x20\x83\xEC\x9F\x76\x1D"[..], - &b"\xB5\xEE\xA0\xC8\xE1\xB5\x74\x0D\x1F\x0A\x1D\xB1\x8A\x52\x6B\xCB\x42\x69"[..], - &b"\xFD\x99\x24\x9E\xCE\xA9\xE8\xFD\x24\xD8\x2A\x58\x27\x00\x01\x71\xA0\xE4"[..], - &b"\x02\x20\xF1\x9B\xC1\x42\x83\x31\xB1\x39\xB3\x3F\x43\x02\x87\xCC\x1C\x12"[..], - &b"\xF2\x84\x47\xA3\x9B\x07\x59\x40\x17\x68\xFE\xE8\x09\xBB\xF2\x54\xD8\x2A"[..], - &b"\x58\x27\x00\x01\x71\xA0\xE4\x02\x20\xB0\x75\x09\x92\x78\x6B\x6B\x4C\xED"[..], - &b"\xF0\xE1\x50\xA3\x1C\xAB\xDF\x25\xA9\x26\x8C\x63\xDD\xCB\x25\x73\x6B\xF5"[..], - &b"\x8D\xE8\xA4\x24\x29"[..], - ] - .concat(); - serde_ipld_dagcbor::from_slice::(&input).expect_err("truncated"); -} - -#[test] -fn invalid_string() { - // Non UTF-8 byte sequence, but using major type 3 (text string) - let input = [0x63, 0xc5, 0x01, 0x02]; - let result = serde_ipld_dagcbor::from_slice::(&input); - assert!(matches!( - result.unwrap_err(), - DecodeError::InvalidUtf8 { .. } - )); -} - -#[test] -fn error_on_undefined() { - // CBOR smple type `undefined` - let input = [0xf7]; - let result = serde_ipld_dagcbor::from_slice::(&input); - assert!(matches!( - result.unwrap_err(), - DecodeError::Unsupported { .. } - )); -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/ipld.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/ipld.rs deleted file mode 100644 index da728fffa6ee..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/ipld.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::collections::BTreeMap; - -use libipld_core::ipld::Ipld; -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -struct TupleStruct(String, i32, u64); - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -struct UnitStruct; - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -struct Struct<'a> { - tuple_struct: TupleStruct, - tuple: (String, f32, f64), - map: BTreeMap, - bytes: &'a [u8], - array: Vec, -} - -use std::iter::FromIterator; - -#[allow(clippy::useless_format)] -#[test] -fn serde() { - let tuple_struct = TupleStruct(format!("test"), -60, 3000); - - let tuple = (format!("hello"), -50.004097, -12.094635556478); - - let map = BTreeMap::from_iter( - [ - (format!("key1"), format!("value1")), - (format!("key2"), format!("value2")), - (format!("key3"), format!("value3")), - (format!("key4"), format!("value4")), - ] - .iter() - .cloned(), - ); - - let bytes = b"test byte string"; - - let array = vec![format!("one"), format!("two"), format!("three")]; - - let data = Struct { - tuple_struct, - tuple, - map, - bytes, - array, - }; - - let ipld = libipld_core::serde::to_ipld(data.clone()).unwrap(); - println!("{:?}", ipld); - - let data_ser = serde_ipld_dagcbor::to_vec(&ipld).unwrap(); - let data_de_ipld: Ipld = serde_ipld_dagcbor::from_slice(&data_ser).unwrap(); - - fn as_object(ipld: &Ipld) -> &BTreeMap { - if let Ipld::Map(ref v) = ipld { - return v; - } - panic!() - } - - for ((k1, v1), (k2, v2)) in as_object(&ipld).iter().zip(as_object(&data_de_ipld).iter()) { - assert_eq!(k1, k2); - assert_eq!(v1, v2); - } - - assert_eq!(ipld, data_de_ipld); -} - -#[test] -fn unit_struct_not_supported() { - let unit_array = vec![UnitStruct, UnitStruct, UnitStruct]; - let ipld = libipld_core::serde::to_ipld(unit_array); - assert!(ipld.is_err()); -} - -#[derive(Debug, Deserialize, Serialize)] -struct SmallStruct { - spam: u32, - eggs: u32, -} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/ser.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/ser.rs deleted file mode 100644 index 154b87d6a01a..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/ser.rs +++ /dev/null @@ -1,142 +0,0 @@ -use serde_bytes::{ByteBuf, Bytes}; -use serde_ipld_dagcbor::{from_slice, to_vec}; -use std::collections::BTreeMap; - -#[test] -fn test_string() { - let value = "foobar".to_owned(); - assert_eq!(&to_vec(&value).unwrap()[..], b"ffoobar"); -} - -#[test] -fn test_list() { - let value = vec![1, 2, 3]; - assert_eq!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03"); -} - -#[test] -fn test_object() { - let mut object = BTreeMap::new(); - object.insert("a".to_owned(), "A".to_owned()); - object.insert("b".to_owned(), "B".to_owned()); - object.insert("c".to_owned(), "C".to_owned()); - object.insert("d".to_owned(), "D".to_owned()); - object.insert("e".to_owned(), "E".to_owned()); - let vec = to_vec(&object).unwrap(); - let test_object = from_slice(&vec[..]).unwrap(); - assert_eq!(object, test_object); -} - -#[test] -fn test_float() { - let vec = to_vec(&12.3f64).unwrap(); - assert_eq!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a"); -} - -#[test] -fn test_f32() { - let vec = to_vec(&4000.5f32).unwrap(); - assert_eq!(vec, b"\xfb\x40\xaf\x41\x00\x00\x00\x00\x00"); -} - -#[test] -fn test_infinity() { - let vec = to_vec(&::std::f64::INFINITY); - assert!(vec.is_err(), "Only finite numbers are supported."); -} - -#[test] -fn test_neg_infinity() { - let vec = to_vec(&::std::f64::NEG_INFINITY); - assert!(vec.is_err(), "Only finite numbers are supported."); -} - -#[test] -fn test_nan() { - let vec = to_vec(&::std::f32::NAN); - assert!(vec.is_err(), "Only finite numbers are supported."); -} - -#[test] -fn test_integer() { - // u8 - let vec = to_vec(&24).unwrap(); - assert_eq!(vec, b"\x18\x18"); - // i8 - let vec = to_vec(&-5).unwrap(); - assert_eq!(vec, b"\x24"); - // i16 - let vec = to_vec(&-300).unwrap(); - assert_eq!(vec, b"\x39\x01\x2b"); - // i32 - let vec = to_vec(&-23567997).unwrap(); - assert_eq!(vec, b"\x3a\x01\x67\x9e\x7c"); - // u64 - let vec = to_vec(&::std::u64::MAX).unwrap(); - assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff"); - // u128 within u64 range - let vec = to_vec(&(u64::MAX as u128)).unwrap(); - assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff"); - // u128 out of range - assert!(to_vec(&(u64::MAX as u128 + 1)).is_err()); - // i128 within u64 range - let vec = to_vec(&(u64::MAX as i128)).unwrap(); - assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff"); - // i128 within -u64 range - let vec = to_vec(&(-(u64::MAX as i128))).unwrap(); - assert_eq!(vec, b"\x3B\xff\xff\xff\xff\xff\xff\xff\xfe"); - // minimum CBOR integer value - let vec = to_vec(&(-(u64::MAX as i128 + 1))).unwrap(); - assert_eq!(vec, b"\x3B\xff\xff\xff\xff\xff\xff\xff\xff"); - // i128 out of -u64 range - assert!(to_vec(&i128::MIN).is_err()); -} - -#[test] -fn test_ip_addr() { - use std::net::Ipv4Addr; - - let addr = Ipv4Addr::new(8, 8, 8, 8); - let vec = to_vec(&addr).unwrap(); - println!("{:?}", vec); - assert_eq!(vec.len(), 5); - let test_addr: Ipv4Addr = from_slice(&vec).unwrap(); - assert_eq!(addr, test_addr); -} - -/// Test all of CBOR's fixed-length byte string types -#[test] -fn test_byte_string() { - // Very short byte strings have 1-byte headers - let short = serde_bytes::Bytes::new(&[0, 1, 2, 255]); - let short_s = to_vec(&short).unwrap(); - assert_eq!(short_s, [0x44, 0, 1, 2, 255]); - - // byte strings > 23 bytes have 2-byte headers - let medium = Bytes::new(&[ - 0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 255, - ]); - let medium_s = to_vec(&medium).unwrap(); - assert_eq!( - medium_s, - [ - 0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 255 - ] - ); - - // byte strings > 256 bytes have 3-byte headers - let long_vec = ByteBuf::from((0..256).map(|i| (i & 0xFF) as u8).collect::>()); - let long_s = to_vec(&long_vec).unwrap(); - assert_eq!(&long_s[0..3], [0x59, 1, 0]); - assert_eq!(&long_s[3..], &long_vec[..]); - - // byte strings > 2^16 bytes have 5-byte headers - let very_long_vec = ByteBuf::from((0..65536).map(|i| (i & 0xFF) as u8).collect::>()); - let very_long_s = to_vec(&very_long_vec).unwrap(); - assert_eq!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]); - assert_eq!(&very_long_s[5..], &very_long_vec[..]); - - // byte strings > 2^32 bytes have 9-byte headers, but they take too much RAM - // to test in Travis. -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..fdc61931efcc --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "8930ea80e02d38dc0791fc11ffd93a72f55f17e9" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.editorconfig b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.editorconfig similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.editorconfig rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.editorconfig diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.github/dependabot.yml b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.github/dependabot.yml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.github/dependabot.yml rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.github/dependabot.yml diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.github/workflows/build.yml b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.github/workflows/build.yml new file mode 100644 index 000000000000..a40afbbcf45c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.github/workflows/build.yml @@ -0,0 +1,53 @@ +name: build + +on: [push, pull_request] + +jobs: + build: + name: Build + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, macos-latest, windows-latest] + toolchain: [stable] + runs-on: ${{ matrix.platform }} + + steps: + - name: Checkout Sources + uses: actions/checkout@v4 + + - name: Cache Dependencies & Build Outputs + uses: actions/cache@v4 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ matrix.toolchain }} + components: rustfmt, clippy + + - name: Check Code Format + run: cargo fmt --all -- --check + + - name: Code lint with default features + run: cargo clippy --all-targets --workspace -- -D warnings + + - name: Code lint without default features + run: cargo clippy --all-targets --workspace --no-default-features -- -D warnings + + - name: Code lint with all features + run: cargo clippy --all-targets --workspace --all-features -- -D warnings + + - name: Test with default features + run: cargo test --all-targets --workspace + + - name: Test without default features + run: cargo test --all-targets --workspace --no-default-features + + - name: Test with all features + run: cargo test --all-targets --workspace --all-features + + - name: Test no-cid-as-bytes feature + run: cargo test --all-targets --workspace --features no-cid-as-bytes diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.gitignore b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.gitignore similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/.gitignore rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/.gitignore diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/Cargo.toml new file mode 100644 index 000000000000..de4bcdf24ddd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/Cargo.toml @@ -0,0 +1,75 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "serde_ipld_dagcbor" +version = "0.6.1" +authors = [ + "Pyfisch ", + "Steven Fackler ", + "Volker Mische ", +] +description = "IPLD DAG-CBOR support for Serde." +readme = "README.md" +keywords = [ + "serde", + "cbor", + "serialization", + "no_std", +] +categories = ["encoding"] +license = "MIT/Apache-2.0" +repository = "https://github.com/ipld/serde_ipld_dagcbor" + +[dependencies.cbor4ii] +version = "0.2.14" +features = ["use_alloc"] +default-features = false + +[dependencies.ipld-core] +version = "0.4.0" +features = ["serde"] +default-features = false + +[dependencies.scopeguard] +version = "1.1.0" + +[dependencies.serde] +version = "1.0.164" +features = ["alloc"] +default-features = false + +[dev-dependencies.serde-transcode] +version = "1.1.1" + +[dev-dependencies.serde_bytes] +version = "0.11.9" +features = ["alloc"] +default-features = false + +[dev-dependencies.serde_derive] +version = "1.0.164" +default-features = false + +[features] +codec = ["ipld-core/codec"] +default = [ + "codec", + "std", +] +no-cid-as-bytes = [] +std = [ + "cbor4ii/use_std", + "ipld-core/std", + "serde/std", + "serde_bytes/std", +] diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/Cargo.toml.orig new file mode 100644 index 000000000000..dbb23f43ffb4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/Cargo.toml.orig @@ -0,0 +1,34 @@ +[package] +name = "serde_ipld_dagcbor" +version = "0.6.1" +authors = [ + "Pyfisch ", + "Steven Fackler ", + "Volker Mische " +] +repository = "https://github.com/ipld/serde_ipld_dagcbor" +readme = "README.md" +license = "MIT/Apache-2.0" +description = "IPLD DAG-CBOR support for Serde." +keywords = ["serde", "cbor", "serialization", "no_std"] +categories = ["encoding"] +edition = "2018" + +[dependencies] +cbor4ii = { version = "0.2.14", default-features = false, features = ["use_alloc"] } +ipld-core = { version = "0.4.0", default-features = false, features = ["serde"] } +scopeguard = "1.1.0" +serde = { version = "1.0.164", default-features = false, features = ["alloc"] } + +[dev-dependencies] +serde_derive = { version = "1.0.164", default-features = false } +serde_bytes = { version = "0.11.9", default-features = false, features = ["alloc"]} +serde-transcode = "1.1.1" + +[features] +default = ["codec", "std"] +std = ["cbor4ii/use_std", "ipld-core/std", "serde/std", "serde_bytes/std"] +# Enable the `Codec` trait implementation. It's a separate feature as it needs Rust >= 1.75. +codec = ["ipld-core/codec"] +# Prevent deserializing CIDs as bytes as much as possible. +no-cid-as-bytes = [] diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/LICENSE-APACHE new file mode 100644 index 000000000000..16fe87b06e80 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/LICENSE-MIT new file mode 100644 index 000000000000..b1b75fa62cf5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) 2015 Pyfisch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/README.md b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/README.md new file mode 100644 index 000000000000..c35bf9551b1e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/README.md @@ -0,0 +1,92 @@ +Serde IPLD DAG-CBOR +=================== + +[![Crates.io](https://img.shields.io/crates/v/serde_ipld_dag_cbor.svg)](https://crates.io/crates/serde_ipld_dagcbor) +[![Documentation](https://docs.rs/serde_ipld_dag_cbor/badge.svg)](https://docs.rs/serde_ipld_dag_cbor) + +This is a [Serde] implementation for [DAG-CBOR]. It can be use in conjunction with [ipld-core]. + +The underlying library for CBOR encoding/decoding is [cbor4ii] and the Serde implementation is also heavily based on their code. + +This crate started as a fork of [serde_cbor], thanks everyone involved there. + +[Serde]: https://github.com/serde-rs/serde +[DAG-CBOR]: https://ipld.io/specs/codecs/dag-cbor/spec/ +[ipld-core]: https://github.com/ipld/rust-ipld-core +[cbor4ii]: https://github.com/quininer/cbor4ii +[serde_cbor]: https://github.com/pyfisch/cbor + + +Usage +----- + +Storing and loading Rust types is easy and requires only +minimal modifications to the program code. + +```rust +use serde_derive::{Deserialize, Serialize}; +use std::error::Error; +use std::fs::File; +use std::io::BufReader; + +// Types annotated with `Serialize` can be stored as DAG-CBOR. +// To be able to load them again add `Deserialize`. +#[derive(Debug, Serialize, Deserialize)] +struct Mascot { + name: String, + species: String, + year_of_birth: u32, +} + +fn main() -> Result<(), Box> { + let ferris = Mascot { + name: "Ferris".to_owned(), + species: "crab".to_owned(), + year_of_birth: 2015, + }; + + let ferris_file = File::create("examples/ferris.cbor")?; + // Write Ferris to the given file. + // Instead of a file you can use any type that implements `io::Write` + // like a HTTP body, database connection etc. + serde_ipld_dagcbor::to_writer(ferris_file, &ferris)?; + + let tux_file = File::open("examples/tux.cbor")?; + let tux_reader = BufReader::new(tux_file); + // Load Tux from a file. + // Serde IPLD DAG-CBOR performs roundtrip serialization meaning that + // the data will not change in any way. + let tux: Mascot = serde_ipld_dagcbor::from_reader(tux_reader)?; + + println!("{:?}", tux); + // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 } + + Ok(()) +} +``` + + +Features +-------- + +### `codec` + +The `codec` feature is enabled by default, it provides the `Codec` trait, which enables encoding and decoding independent of the IPLD Codec. The minimum supported Rust version (MSRV) can significantly be reduced to 1.64 by disabling this feature. + + +### `no-cid-as-bytes` + +Sometimes it is desired that a CID is not accidentally deserialized into bytes. This can happen because the intermediate serde data model does not retain enough information to be able to differentiate between a bytes container and a CID container when there is a conflicting choice to be made, as in the case of some enum cases. The `no-cid-as-bytes` feature can be enabled in order to error at runtime in such cases. + +The problem with that feature is, that it breaks Serde's derive attributes for [internally tagged enums](https://serde.rs/enum-representations.html#internally-tagged) (`#[serde(tag = "sometag")]`) and [untagged enums](https://serde.rs/enum-representations.html#untagged) (`#serde(untagged)`). If this feature is enabled and you still need similar functionality, you could implement a deserializer manually. Examples of how to do that are in the [enum example](examples/enums.rs). + + +License +------- + +Licensed under either of + + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/enums.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/enums.rs new file mode 100644 index 000000000000..7b2a95c4e2b2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/enums.rs @@ -0,0 +1,138 @@ +/// Serde untagged (`#[serde(untagged)]`) and internaly tagged enums (`#[serde(tag = "tag")]`) are +/// not supported by CIDs. Here examples are provided on how to implement similar behaviour. This +/// file also contains an example for a kinded enum. +use std::convert::{TryFrom, TryInto}; + +use ipld_core::{cid::Cid, ipld::Ipld}; +use serde::{de, Deserialize}; +use serde_bytes::ByteBuf; +use serde_derive::Deserialize; +use serde_ipld_dagcbor::from_slice; + +/// The CID `bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy` encoded as CBOR +/// 42(h'00015512202C26B46B68FFC68FF99B453C1D30413413422D706483BFA0F98A5E886266E7AE') +const CBOR_CID_FIXTURE: [u8; 41] = [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, 0xc6, + 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, 0x83, 0xbf, + 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, +]; + +/// This enum shows how an internally tagged enum could be implemented. +#[derive(Debug, PartialEq)] +enum CidInInternallyTaggedEnum { + MyCid { cid: Cid }, +} + +// This manual deserializer implementation works as if you would derive `Deserialize` and add +// `#[serde(tag = "type")]` to the `CidInternallyTaggedEnum` enum. +impl<'de> de::Deserialize<'de> for CidInInternallyTaggedEnum { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + #[derive(Deserialize)] + struct Tagged { + r#type: String, + cid: Cid, + } + + let Tagged { r#type, cid } = Deserialize::deserialize(deserializer)?; + if r#type == "MyCid" { + Ok(CidInInternallyTaggedEnum::MyCid { cid }) + } else { + Err(de::Error::custom("No matching enum variant found")) + } + } +} + +/// This enum shows how an untagged enum could be implemented. +#[derive(Debug, PartialEq)] +enum CidInUntaggedEnum { + MyCid(Cid), +} + +// This manual deserializer implementation works as if you would derive `Deserialize` and add +// `#[serde(untagged)]`. +impl<'de> de::Deserialize<'de> for CidInUntaggedEnum { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Cid::deserialize(deserializer) + .map(CidInUntaggedEnum::MyCid) + .map_err(|_| de::Error::custom("No matching enum variant found")) + } +} + +/// This enum shows how a kinded enum could be implemented. +#[derive(Debug, PartialEq)] +pub enum Kinded { + Bytes(ByteBuf), + Link(Cid), +} + +impl TryFrom for Kinded { + type Error = (); + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::Bytes(bytes) => Ok(Self::Bytes(ByteBuf::from(bytes))), + Ipld::Link(cid) => Ok(Self::Link(cid)), + _ => Err(()), + } + } +} + +impl<'de> de::Deserialize<'de> for Kinded { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ipld::deserialize(deserializer).and_then(|ipld| { + ipld.try_into() + .map_err(|_| de::Error::custom("No matching enum variant found")) + }) + } +} + +pub fn main() { + let cid: Cid = from_slice(&CBOR_CID_FIXTURE).unwrap(); + + // {"type": "MyCid", "cid": 42(h'00015512202C26B46B68FFC68FF99B453C1D30413413422D706483BFA0F98A5E886266E7AE')} + let cbor_internally_tagged_enum = [ + &[ + 0xa2, 0x64, 0x74, 0x79, 0x70, 0x65, 0x65, 0x4d, 0x79, 0x43, 0x69, 0x64, 0x63, 0x63, + 0x69, 0x64, + ], + &CBOR_CID_FIXTURE[..], + ] + .concat(); + assert_eq!( + from_slice::(&cbor_internally_tagged_enum).unwrap(), + CidInInternallyTaggedEnum::MyCid { cid } + ); + + assert_eq!( + from_slice::(&CBOR_CID_FIXTURE).unwrap(), + CidInUntaggedEnum::MyCid(cid) + ); + + assert_eq!( + from_slice::(&CBOR_CID_FIXTURE).unwrap(), + Kinded::Link(cid) + ); + + // The CID without the tag 42 prefix, so that it decodes as just bytes. + let cbor_bytes = &CBOR_CID_FIXTURE[2..]; + let decoded_bytes: Kinded = from_slice(cbor_bytes).unwrap(); + // The CBOR decoded bytes don't contain the prefix with the bytes type identifier and the + // length. + let bytes = cbor_bytes[2..].to_vec(); + assert_eq!(decoded_bytes, Kinded::Bytes(ByteBuf::from(bytes))); +} + +// Make it possible to run this example as test. +#[test] +fn test_main() { + main() +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/examples/readme.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/readme.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/examples/readme.rs rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/readme.rs diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/examples/tux.cbor b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/tux.cbor similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/examples/tux.cbor rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/examples/tux.cbor diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/cbor4ii_nonpub.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/cbor4ii_nonpub.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/src/cbor4ii_nonpub.rs rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/cbor4ii_nonpub.rs diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/codec.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/codec.rs new file mode 100644 index 000000000000..9a7c9b02df23 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/codec.rs @@ -0,0 +1,43 @@ +//! Implementation of ipld-core's `Codec` trait. + +use std::io::{BufRead, Write}; + +use ipld_core::{ + cid::Cid, + codec::{Codec, Links}, + serde::ExtractLinks, +}; +use serde::{de::Deserialize, ser::Serialize}; + +use crate::{de::Deserializer, error::CodecError}; + +/// DAG-CBOR implementation of ipld-core's `Codec` trait. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct DagCborCodec; + +impl Codec for DagCborCodec +where + T: for<'a> Deserialize<'a> + Serialize, +{ + const CODE: u64 = 0x71; + type Error = CodecError; + + fn decode(reader: R) -> Result { + Ok(crate::from_reader(reader)?) + } + + fn encode(writer: W, data: &T) -> Result<(), Self::Error> { + Ok(crate::to_writer(writer, data)?) + } +} + +impl Links for DagCborCodec { + type LinksError = CodecError; + + fn links(data: &[u8]) -> Result, Self::LinksError> { + let mut deserializer = Deserializer::from_slice(data); + Ok(ExtractLinks::deserialize(&mut deserializer)? + .into_vec() + .into_iter()) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/de.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/de.rs new file mode 100644 index 000000000000..5e46c1cb1bc8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/de.rs @@ -0,0 +1,695 @@ +//! Deserialization. +#[cfg(not(feature = "std"))] +use alloc::borrow::Cow; +use core::convert::{Infallible, TryFrom}; +#[cfg(feature = "std")] +use std::borrow::Cow; + +use cbor4ii::core::dec::{self, Decode}; +use cbor4ii::core::{major, types, utils::SliceReader}; +use ipld_core::cid::serde::CID_SERDE_PRIVATE_IDENTIFIER; +use serde::de::{self, Visitor}; + +use crate::cbor4ii_nonpub::{marker, peek_one, pull_one}; +use crate::error::DecodeError; +use crate::CBOR_TAGS_CID; +#[cfg(feature = "std")] +use cbor4ii::core::utils::IoReader; + +/// Decodes a value from CBOR data in a slice. +/// +/// # Examples +/// +/// Deserialize a `String` +/// +/// ``` +/// # use serde_ipld_dagcbor::de; +/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; +/// let value: String = de::from_slice(&v[..]).unwrap(); +/// assert_eq!(value, "foobar"); +/// ``` +/// +/// Deserialize a borrowed string with zero copies. +/// +/// ``` +/// # use serde_ipld_dagcbor::de; +/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; +/// let value: &str = de::from_slice(&v[..]).unwrap(); +/// assert_eq!(value, "foobar"); +/// ``` +pub fn from_slice<'a, T>(buf: &'a [u8]) -> Result> +where + T: de::Deserialize<'a>, +{ + let reader = SliceReader::new(buf); + let mut deserializer = Deserializer::from_reader(reader); + let value = serde::Deserialize::deserialize(&mut deserializer)?; + deserializer.end()?; + Ok(value) +} + +/// Decodes a value from CBOR data in a reader. +/// +/// # Examples +/// +/// Deserialize a `String` +/// +/// ``` +/// # use serde_ipld_dagcbor::de; +/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; +/// let value: String = de::from_reader(&v[..]).unwrap(); +/// assert_eq!(value, "foobar"); +/// ``` +/// +/// Note that `from_reader` cannot borrow data: +/// +/// ```compile_fail +/// # use serde_ipld_dagcbor::de; +/// let v: Vec = vec![0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]; +/// let value: &str = de::from_reader(&v[..]).unwrap(); +/// assert_eq!(value, "foobar"); +/// ``` +#[cfg(feature = "std")] +pub fn from_reader(reader: R) -> Result> +where + T: de::DeserializeOwned, + R: std::io::BufRead, +{ + let reader = IoReader::new(reader); + let mut deserializer = Deserializer::from_reader(reader); + let value = serde::Deserialize::deserialize(&mut deserializer)?; + deserializer.end()?; + Ok(value) +} + +/// A Serde `Deserialize`r of DAG-CBOR data. +#[derive(Debug)] +pub struct Deserializer { + reader: R, +} + +impl Deserializer { + /// Constructs a `Deserializer` which reads from a `Read`er. + pub fn from_reader(reader: R) -> Deserializer { + Deserializer { reader } + } +} + +impl<'a> Deserializer> { + /// Constructs a `Deserializer` that reads from a slice. + pub fn from_slice(buf: &'a [u8]) -> Self { + Deserializer { + reader: SliceReader::new(buf), + } + } +} + +impl<'de, R: dec::Read<'de>> Deserializer { + #[allow(clippy::type_complexity)] + #[inline] + fn try_step<'a>( + &'a mut self, + ) -> Result ()>, DecodeError> + { + if self.reader.step_in() { + Ok(scopeguard::guard(self, |de| de.reader.step_out())) + } else { + Err(DecodeError::DepthLimit) + } + } + + #[inline] + fn deserialize_cid(&mut self, visitor: V) -> Result> + where + V: Visitor<'de>, + { + let tag = dec::TagStart::decode(&mut self.reader)?; + + match tag.0 { + CBOR_TAGS_CID => visitor.visit_newtype_struct(&mut CidDeserializer(self)), + _ => Err(DecodeError::TypeMismatch { + name: "CBOR tag", + byte: tag.0 as u8, + }), + } + } + + /// This method should be called after a value has been deserialized to ensure there is no + /// trailing data in the input source. + pub fn end(&mut self) -> Result<(), DecodeError> { + match peek_one(&mut self.reader) { + Ok(_) => Err(DecodeError::TrailingData), + Err(DecodeError::Eof) => Ok(()), + Err(error) => Err(error), + } + } +} + +macro_rules! deserialize_type { + ( @ $t:ty , $name:ident , $visit:ident ) => { + #[inline] + fn $name(self, visitor: V) -> Result + where V: Visitor<'de> + { + let value = <$t>::decode(&mut self.reader)?; + visitor.$visit(value) + } + }; + ( $( $t:ty , $name:ident , $visit:ident );* $( ; )? ) => { + $( + deserialize_type!(@ $t, $name, $visit); + )* + }; +} + +impl<'de, 'a, R: dec::Read<'de>> serde::Deserializer<'de> for &'a mut Deserializer { + type Error = DecodeError; + + fn deserialize_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let mut de = self.try_step()?; + let de = &mut *de; + + let byte = peek_one(&mut de.reader)?; + if is_indefinite(byte) { + return Err(DecodeError::IndefiniteSize); + } + match dec::if_major(byte) { + major::UNSIGNED => de.deserialize_u64(visitor), + major::NEGATIVE => { + // CBOR supports negative integers up to -2^64 which is less than i64::MIN. Only + // treat it as i128, if it is outside the i64 range. + let value = i128::decode(&mut de.reader)?; + match i64::try_from(value) { + Ok(value_i64) => visitor.visit_i64(value_i64), + Err(_) => visitor.visit_i128(value), + } + } + major::BYTES => de.deserialize_byte_buf(visitor), + major::STRING => de.deserialize_string(visitor), + major::ARRAY => de.deserialize_seq(visitor), + major::MAP => de.deserialize_map(visitor), + // The only supported tag is tag 42 (CID). + major::TAG => de.deserialize_cid(visitor), + major::SIMPLE => match byte { + marker::FALSE => { + de.reader.advance(1); + visitor.visit_bool(false) + } + marker::TRUE => { + de.reader.advance(1); + visitor.visit_bool(true) + } + marker::NULL => { + de.reader.advance(1); + visitor.visit_none() + } + marker::F32 => de.deserialize_f32(visitor), + marker::F64 => de.deserialize_f64(visitor), + _ => Err(DecodeError::Unsupported { byte }), + }, + _ => Err(DecodeError::Unsupported { byte }), + } + } + + deserialize_type!( + bool, deserialize_bool, visit_bool; + + i8, deserialize_i8, visit_i8; + i16, deserialize_i16, visit_i16; + i32, deserialize_i32, visit_i32; + i64, deserialize_i64, visit_i64; + i128, deserialize_i128, visit_i128; + + u8, deserialize_u8, visit_u8; + u16, deserialize_u16, visit_u16; + u32, deserialize_u32, visit_u32; + u64, deserialize_u64, visit_u64; + u128, deserialize_u128, visit_u128; + + f32, deserialize_f32, visit_f32; + f64, deserialize_f64, visit_f64; + ); + + #[inline] + fn deserialize_char(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + // Treat it as a String. + // This is a bit wasteful when encountering strings of more than one character, + // but we are optimistic this is a cold path. + self.deserialize_str(visitor) + } + + #[inline] + fn deserialize_bytes(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match >>::decode(&mut self.reader)?.0 { + Cow::Borrowed(buf) => visitor.visit_borrowed_bytes(buf), + Cow::Owned(buf) => visitor.visit_byte_buf(buf), + } + } + + #[inline] + fn deserialize_byte_buf(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_bytes(visitor) + } + + #[inline] + fn deserialize_str(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + match >::decode(&mut self.reader)? { + Cow::Borrowed(buf) => visitor.visit_borrowed_str(buf), + Cow::Owned(buf) => visitor.visit_string(buf), + } + } + + #[inline] + fn deserialize_string(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(visitor) + } + + #[inline] + fn deserialize_option(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let byte = peek_one(&mut self.reader)?; + if byte != marker::NULL { + let mut de = self.try_step()?; + visitor.visit_some(&mut **de) + } else { + self.reader.advance(1); + visitor.visit_none() + } + } + + #[inline] + fn deserialize_unit(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let byte = pull_one(&mut self.reader)?; + if byte == marker::NULL { + visitor.visit_unit() + } else { + Err(DecodeError::TypeMismatch { name: "unit", byte }) + } + } + + #[inline] + fn deserialize_unit_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_unit(visitor) + } + + #[inline] + fn deserialize_newtype_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + if name == CID_SERDE_PRIVATE_IDENTIFIER { + self.deserialize_cid(visitor) + } else { + visitor.visit_newtype_struct(self) + } + } + + #[inline] + fn deserialize_seq(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let mut de = self.try_step()?; + let seq = Accessor::array(&mut de)?; + visitor.visit_seq(seq) + } + + #[inline] + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + let mut de = self.try_step()?; + let seq = Accessor::tuple(&mut de, len)?; + visitor.visit_seq(seq) + } + + #[inline] + fn deserialize_tuple_struct( + self, + _name: &'static str, + len: usize, + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_tuple(len, visitor) + } + + #[inline] + fn deserialize_map(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let mut de = self.try_step()?; + let map = Accessor::map(&mut de)?; + visitor.visit_map(map) + } + + #[inline] + fn deserialize_struct( + self, + _name: &'static str, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_map(visitor) + } + + #[inline] + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + let mut de = self.try_step()?; + let accessor = EnumAccessor::enum_(&mut de)?; + visitor.visit_enum(accessor) + } + + #[inline] + fn deserialize_identifier(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + self.deserialize_str(visitor) + } + + #[inline] + fn deserialize_ignored_any(self, visitor: V) -> Result + where + V: Visitor<'de>, + { + let _ignore = dec::IgnoredAny::decode(&mut self.reader)?; + visitor.visit_unit() + } + + #[inline] + fn is_human_readable(&self) -> bool { + false + } +} + +struct Accessor<'a, R> { + de: &'a mut Deserializer, + len: usize, +} + +impl<'de, 'a, R: dec::Read<'de>> Accessor<'a, R> { + #[inline] + pub fn array(de: &'a mut Deserializer) -> Result, DecodeError> { + let array_start = dec::ArrayStart::decode(&mut de.reader)?; + array_start.0.map_or_else( + || Err(DecodeError::IndefiniteSize), + move |len| Ok(Accessor { de, len }), + ) + } + + #[inline] + pub fn tuple( + de: &'a mut Deserializer, + len: usize, + ) -> Result, DecodeError> { + let array_start = dec::ArrayStart::decode(&mut de.reader)?; + + if array_start.0 == Some(len) { + Ok(Accessor { de, len }) + } else { + Err(DecodeError::RequireLength { + name: "tuple", + expect: len, + value: array_start.0.unwrap_or(0), + }) + } + } + + #[inline] + pub fn map(de: &'a mut Deserializer) -> Result, DecodeError> { + let map_start = dec::MapStart::decode(&mut de.reader)?; + map_start.0.map_or_else( + || Err(DecodeError::IndefiniteSize), + move |len| Ok(Accessor { de, len }), + ) + } +} + +impl<'de, 'a, R> de::SeqAccess<'de> for Accessor<'a, R> +where + R: dec::Read<'de>, +{ + type Error = DecodeError; + + #[inline] + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + if self.len > 0 { + self.len -= 1; + Ok(Some(seed.deserialize(&mut *self.de)?)) + } else { + Ok(None) + } + } + + #[inline] + fn size_hint(&self) -> Option { + Some(self.len) + } +} + +impl<'de, 'a, R: dec::Read<'de>> de::MapAccess<'de> for Accessor<'a, R> { + type Error = DecodeError; + + #[inline] + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: de::DeserializeSeed<'de>, + { + if self.len > 0 { + self.len -= 1; + Ok(Some(seed.deserialize(&mut *self.de)?)) + } else { + Ok(None) + } + } + + #[inline] + fn next_value_seed(&mut self, seed: V) -> Result + where + V: de::DeserializeSeed<'de>, + { + seed.deserialize(&mut *self.de) + } + + #[inline] + fn size_hint(&self) -> Option { + Some(self.len) + } +} + +struct EnumAccessor<'a, R> { + de: &'a mut Deserializer, +} + +impl<'de, 'a, R: dec::Read<'de>> EnumAccessor<'a, R> { + #[inline] + pub fn enum_( + de: &'a mut Deserializer, + ) -> Result, DecodeError> { + let byte = peek_one(&mut de.reader)?; + match dec::if_major(byte) { + // string + major::STRING => Ok(EnumAccessor { de }), + // 1 length map + major::MAP if byte == (major::MAP << 5) | 1 => { + de.reader.advance(1); + Ok(EnumAccessor { de }) + } + _ => Err(DecodeError::TypeMismatch { name: "enum", byte }), + } + } +} + +impl<'de, 'a, R> de::EnumAccess<'de> for EnumAccessor<'a, R> +where + R: dec::Read<'de>, +{ + type Error = DecodeError; + type Variant = EnumAccessor<'a, R>; + + #[inline] + fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: de::DeserializeSeed<'de>, + { + let variant = seed.deserialize(&mut *self.de)?; + Ok((variant, self)) + } +} + +impl<'de, 'a, R> de::VariantAccess<'de> for EnumAccessor<'a, R> +where + R: dec::Read<'de>, +{ + type Error = DecodeError; + + #[inline] + fn unit_variant(self) -> Result<(), Self::Error> { + Ok(()) + } + + #[inline] + fn newtype_variant_seed(self, seed: T) -> Result + where + T: de::DeserializeSeed<'de>, + { + seed.deserialize(&mut *self.de) + } + + #[inline] + fn tuple_variant(self, len: usize, visitor: V) -> Result + where + V: Visitor<'de>, + { + use serde::Deserializer; + + self.de.deserialize_tuple(len, visitor) + } + + #[inline] + fn struct_variant( + self, + _fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + use serde::Deserializer; + + self.de.deserialize_map(visitor) + } +} + +/// Deserialize a DAG-CBOR encoded CID. +/// +/// This is without the CBOR tag information. It is only the CBOR byte string identifier (major +/// type 2), the number of bytes, and a null byte prefixed CID. +/// +/// The reason for not including the CBOR tag information is the [`Value`] implementation. That one +/// starts to parse the bytes, before we could interfere. If the data only includes a CID, we are +/// parsing over the tag to determine whether it is a CID or not and go from there. +struct CidDeserializer<'a, R>(&'a mut Deserializer); + +impl<'de, 'a, R: dec::Read<'de>> de::Deserializer<'de> for &'a mut CidDeserializer<'a, R> { + type Error = DecodeError; + + #[cfg(not(feature = "no-cid-as-bytes"))] + fn deserialize_any>(self, visitor: V) -> Result { + self.deserialize_bytes(visitor) + } + #[cfg(feature = "no-cid-as-bytes")] + fn deserialize_any>(self, _visitor: V) -> Result { + Err(de::Error::custom( + "Only bytes can be deserialized into a CID", + )) + } + + #[inline] + fn deserialize_bytes>(self, visitor: V) -> Result { + let byte = peek_one(&mut self.0.reader)?; + match dec::if_major(byte) { + major::BYTES => { + // CBOR encoded CIDs have a zero byte prefix we have to remove. + match >>::decode(&mut self.0.reader)?.0 { + Cow::Borrowed(buf) => { + if buf.len() <= 1 || buf[0] != 0 { + Err(DecodeError::Msg("Invalid CID".into())) + } else { + visitor.visit_borrowed_bytes(&buf[1..]) + } + } + Cow::Owned(mut buf) => { + if buf.len() <= 1 || buf[0] != 0 { + Err(DecodeError::Msg("Invalid CID".into())) + } else { + buf.remove(0); + visitor.visit_byte_buf(buf) + } + } + } + } + _ => Err(DecodeError::Unsupported { byte }), + } + } + + fn deserialize_newtype_struct>( + self, + name: &str, + visitor: V, + ) -> Result { + if name == CID_SERDE_PRIVATE_IDENTIFIER { + self.deserialize_bytes(visitor) + } else { + Err(de::Error::custom([ + "This deserializer must not be called on newtype structs other than one named `", + CID_SERDE_PRIVATE_IDENTIFIER, + "`" + ].concat())) + } + } + + serde::forward_to_deserialize_any! { + bool byte_buf char enum f32 f64 i8 i16 i32 i64 identifier ignored_any map option seq str + string struct tuple tuple_struct u8 u16 u32 u64 unit unit_struct + } +} + +/// Check if byte is a major type with indefinite length. +#[inline] +pub fn is_indefinite(byte: u8) -> bool { + byte & marker::START == marker::START +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/error.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/error.rs new file mode 100644 index 000000000000..adfdf90a71ae --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/error.rs @@ -0,0 +1,255 @@ +//! When serializing or deserializing DAG-CBOR goes wrong. + +use alloc::{ + collections::TryReserveError, + string::{String, ToString}, +}; +use core::{convert::Infallible, fmt, num::TryFromIntError}; + +use serde::{de, ser}; + +/// An encoding error. +#[derive(Debug)] +pub enum EncodeError { + /// Custom error message. + Msg(String), + /// IO Error. + Write(E), +} + +impl From for EncodeError { + fn from(err: E) -> EncodeError { + EncodeError::Write(err) + } +} + +#[cfg(feature = "std")] +impl ser::Error for EncodeError { + fn custom(msg: T) -> Self { + EncodeError::Msg(msg.to_string()) + } +} + +#[cfg(not(feature = "std"))] +impl ser::Error for EncodeError { + fn custom(msg: T) -> Self { + EncodeError::Msg(msg.to_string()) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for EncodeError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + EncodeError::Msg(_) => None, + EncodeError::Write(err) => Some(err), + } + } +} + +#[cfg(not(feature = "std"))] +impl ser::StdError for EncodeError {} + +impl fmt::Display for EncodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self, f) + } +} + +impl From> for EncodeError { + fn from(err: cbor4ii::EncodeError) -> EncodeError { + match err { + cbor4ii::EncodeError::Write(e) => EncodeError::Write(e), + // Needed as `cbor4ii::EncodeError` is markes as non_exhaustive + _ => EncodeError::Msg(err.to_string()), + } + } +} + +/// A decoding error. +#[derive(Debug)] +pub enum DecodeError { + /// Custom error message. + Msg(String), + /// IO error. + Read(E), + /// End of file. + Eof, + /// Unexpected byte. + Mismatch { + /// Expected CBOR major type. + expect_major: u8, + /// Unexpected byte. + byte: u8, + }, + /// Unexpected type. + TypeMismatch { + /// Type name. + name: &'static str, + /// Type byte. + byte: u8, + }, + /// Too large integer. + CastOverflow(TryFromIntError), + /// Overflowing 128-bit integers. + Overflow { + /// Type of integer. + name: &'static str, + }, + /// Decoding bytes/strings might require a borrow. + RequireBorrowed { + /// Type name (e.g. "bytes", "str"). + name: &'static str, + }, + /// Length wasn't large enough. + RequireLength { + /// Type name. + name: &'static str, + /// Required length. + expect: usize, + /// Given length. + value: usize, + }, + /// Invalid UTF-8. + InvalidUtf8(core::str::Utf8Error), + /// Unsupported byte. + Unsupported { + /// Unsupported bute. + byte: u8, + }, + /// Recursion limit reached. + DepthLimit, + /// Trailing data. + TrailingData, + /// Indefinite sized item was encountered. + IndefiniteSize, +} + +impl From for DecodeError { + fn from(err: E) -> DecodeError { + DecodeError::Read(err) + } +} + +#[cfg(feature = "std")] +impl de::Error for DecodeError { + fn custom(msg: T) -> Self { + DecodeError::Msg(msg.to_string()) + } +} + +#[cfg(not(feature = "std"))] +impl de::Error for DecodeError { + fn custom(msg: T) -> Self { + DecodeError::Msg(msg.to_string()) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for DecodeError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + DecodeError::Msg(_) => None, + DecodeError::Read(err) => Some(err), + _ => None, + } + } +} + +#[cfg(not(feature = "std"))] +impl ser::StdError for DecodeError {} + +impl fmt::Display for DecodeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self, f) + } +} + +impl From> for DecodeError { + fn from(err: cbor4ii::DecodeError) -> DecodeError { + match err { + cbor4ii::DecodeError::Read(read) => DecodeError::Read(read), + cbor4ii::DecodeError::Eof => DecodeError::Eof, + cbor4ii::DecodeError::Mismatch { expect_major, byte } => { + DecodeError::Mismatch { expect_major, byte } + } + cbor4ii::DecodeError::TypeMismatch { name, byte } => { + DecodeError::TypeMismatch { name, byte } + } + cbor4ii::DecodeError::CastOverflow(overflow) => DecodeError::CastOverflow(overflow), + cbor4ii::DecodeError::Overflow { name } => DecodeError::Overflow { name }, + cbor4ii::DecodeError::RequireBorrowed { name } => DecodeError::RequireBorrowed { name }, + cbor4ii::DecodeError::RequireLength { + name, + expect, + value, + } => DecodeError::RequireLength { + name, + expect, + value, + }, + cbor4ii::DecodeError::InvalidUtf8(invalid) => DecodeError::InvalidUtf8(invalid), + cbor4ii::DecodeError::Unsupported { byte } => DecodeError::Unsupported { byte }, + cbor4ii::DecodeError::DepthLimit => DecodeError::DepthLimit, + // Needed as `cbor4ii::EncodeError` is markes as non_exhaustive + _ => DecodeError::Msg(err.to_string()), + } + } +} + +/// Encode and Decode error combined. +#[derive(Debug)] +pub enum CodecError { + /// A decoding error. + Decode(DecodeError), + /// An encoding error. + Encode(EncodeError), + /// A decoding error. + #[cfg(feature = "std")] + DecodeIo(DecodeError), + /// An encoding error. + #[cfg(feature = "std")] + EncodeIo(EncodeError), +} + +impl fmt::Display for CodecError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Decode(error) => write!(f, "decode error: {}", error), + Self::Encode(error) => write!(f, "encode error: {}", error), + #[cfg(feature = "std")] + Self::DecodeIo(error) => write!(f, "decode io error: {}", error), + #[cfg(feature = "std")] + Self::EncodeIo(error) => write!(f, "encode io error: {}", error), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for CodecError {} + +impl From> for CodecError { + fn from(error: DecodeError) -> Self { + Self::Decode(error) + } +} + +#[cfg(feature = "std")] +impl From> for CodecError { + fn from(error: DecodeError) -> Self { + Self::DecodeIo(error) + } +} + +impl From> for CodecError { + fn from(error: EncodeError) -> Self { + Self::Encode(error) + } +} + +#[cfg(feature = "std")] +impl From> for CodecError { + fn from(error: EncodeError) -> Self { + Self::EncodeIo(error) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/lib.rs new file mode 100644 index 000000000000..32eb2518a2b3 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/lib.rs @@ -0,0 +1,145 @@ +//! DAG-CBOR serialization and deserialization. +//! +//! # Usage +//! +//! Add this to your `Cargo.toml`: +//! ```toml +//! [dependencies] +//! serde_ipld_dagcbor = "0.1.0" +//! ``` +//! +//! Storing and loading Rust types is easy and requires only +//! minimal modifications to the program code. +//! +//! ```rust +//! # #[cfg(not(feature = "std"))] +//! # fn main() {} +//! use serde_derive::{Deserialize, Serialize}; +//! use std::error::Error; +//! use std::fs::File; +//! use std::io::BufReader; +//! +//! // Types annotated with `Serialize` can be stored as CBOR. +//! // To be able to load them again add `Deserialize`. +//! #[derive(Debug, Serialize, Deserialize)] +//! struct Mascot { +//! name: String, +//! species: String, +//! year_of_birth: u32, +//! } +//! +//! # #[cfg(feature = "std")] +//! fn main() -> Result<(), Box> { +//! let ferris = Mascot { +//! name: "Ferris".to_owned(), +//! species: "crab".to_owned(), +//! year_of_birth: 2015, +//! }; +//! +//! let ferris_file = File::create("examples/ferris.cbor")?; +//! // Write Ferris to the given file. +//! // Instead of a file you can use any type that implements `io::Write` +//! // like a HTTP body, database connection etc. +//! serde_ipld_dagcbor::to_writer(ferris_file, &ferris)?; +//! +//! let tux_file = File::open("examples/tux.cbor")?; +//! let tux_reader = BufReader::new(tux_file); +//! // Load Tux from a file. +//! // Serde CBOR performs roundtrip serialization meaning that +//! // the data will not change in any way. +//! let tux: Mascot = serde_ipld_dagcbor::from_reader(tux_reader)?; +//! +//! println!("{:?}", tux); +//! // prints: Mascot { name: "Tux", species: "penguin", year_of_birth: 1996 } +//! +//! Ok(()) +//! } +//! ``` +//! +//! There are a lot of options available to customize the format. +//! To operate on untyped DAG-CBOR values have a look at the [`ipld_core::ipld::Ipld`] type. +//! +//! # Type-based Serialization and Deserialization +//! Serde provides a mechanism for low boilerplate serialization & deserialization of values to and +//! from CBOR via the serialization API. To be able to serialize a piece of data, it must implement +//! the `serde::Serialize` trait. To be able to deserialize a piece of data, it must implement the +//! `serde::Deserialize` trait. Serde provides an annotation to automatically generate the +//! code for these traits: `#[derive(Serialize, Deserialize)]`. +//! +//! Read a general CBOR value with an unknown content. +//! +//! ```rust +//! use serde_ipld_dagcbor::from_slice; +//! use ipld_core::ipld::Ipld; +//! +//! let slice = b"\x82\x01\xa1aaab"; +//! let value: Ipld = from_slice(slice).unwrap(); +//! println!("{:?}", value); // List([Integer(1), Map({"a": String("b")})]) +//! ``` +//! +//! Serialize an object. +//! +//! ```rust +//! use std::collections::BTreeMap; +//! use serde_ipld_dagcbor::to_vec; +//! +//! let mut programming_languages = BTreeMap::new(); +//! programming_languages.insert("rust", vec!["safe", "concurrent", "fast"]); +//! programming_languages.insert("python", vec!["powerful", "friendly", "open"]); +//! programming_languages.insert("js", vec!["lightweight", "interpreted", "object-oriented"]); +//! let encoded = to_vec(&programming_languages); +//! assert_eq!(encoded.unwrap().len(), 103); +//! ``` +//! +//! # `no-std` support +//! +//! Serde CBOR supports building in a `no_std` context, use the following lines +//! in your `Cargo.toml` dependencies: +//! ``` toml +//! [dependencies] +//! serde = { version = "1.0", default-features = false } +//! serde_ipld_dagcbor = { version = "0.1.0", default-features = false } +//! ``` +//! +//! Without the `std` feature the functions [from_reader], and [to_writer] are not exported. +//! +//! *Note*: to use derive macros in serde you will need to declare `serde` +//! dependency like so: +//! ``` toml +//! serde = { version = "1.0", default-features = false, features = ["derive"] } +//! ``` + +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +mod cbor4ii_nonpub; +// The `Codec` implementation is only available if the `no-cid-as-bytes` feature is disabled, due +// to the links being extracted with a Serde based approach. +#[cfg(all(feature = "std", not(feature = "no-cid-as-bytes"), feature = "codec"))] +pub mod codec; +pub mod de; +pub mod error; +pub mod ser; + +#[doc(inline)] +pub use crate::error::{DecodeError, EncodeError}; + +// Convenience functions for serialization and deserialization. +#[doc(inline)] +pub use crate::de::from_slice; + +#[cfg(feature = "std")] +#[doc(inline)] +pub use crate::de::from_reader; + +#[doc(inline)] +pub use crate::ser::to_vec; + +#[cfg(feature = "std")] +#[doc(inline)] +pub use crate::ser::to_writer; + +/// The CBOR tag that is used for CIDs. +const CBOR_TAGS_CID: u64 = 42; diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/ser.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/ser.rs new file mode 100644 index 000000000000..27c855d129f4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/src/ser.rs @@ -0,0 +1,699 @@ +//! Serialization. +#[cfg(not(feature = "std"))] +use alloc::{collections::TryReserveError, string::ToString, vec::Vec}; +#[cfg(feature = "std")] +use std::collections::TryReserveError; + +pub use cbor4ii::core::utils::BufWriter; +#[cfg(feature = "std")] +use cbor4ii::core::utils::IoWriter; +use cbor4ii::core::{ + enc::{self, Encode}, + types, +}; +use ipld_core::cid::serde::CID_SERDE_PRIVATE_IDENTIFIER; +use serde::{ser, Serialize}; + +use crate::error::EncodeError; +use crate::CBOR_TAGS_CID; + +/// Serializes a value to a vector. +pub fn to_vec(value: &T) -> Result, EncodeError> +where + T: Serialize + ?Sized, +{ + let writer = BufWriter::new(Vec::new()); + let mut serializer = Serializer::new(writer); + value.serialize(&mut serializer)?; + Ok(serializer.into_inner().into_inner()) +} + +/// Serializes a value to a writer. +#[cfg(feature = "std")] +pub fn to_writer(writer: W, value: &T) -> Result<(), EncodeError> +where + W: std::io::Write, + T: Serialize, +{ + let mut serializer = Serializer::new(IoWriter::new(writer)); + value.serialize(&mut serializer) +} + +/// A structure for serializing Rust values to DAG-CBOR. +pub struct Serializer { + writer: W, +} + +impl Serializer { + /// Creates a new CBOR serializer. + pub fn new(writer: W) -> Serializer { + Serializer { writer } + } + + /// Returns the underlying writer. + pub fn into_inner(self) -> W { + self.writer + } +} + +impl<'a, W: enc::Write> serde::Serializer for &'a mut Serializer { + type Ok = (); + type Error = EncodeError; + + type SerializeSeq = CollectSeq<'a, W>; + type SerializeTuple = BoundedCollect<'a, W>; + type SerializeTupleStruct = BoundedCollect<'a, W>; + type SerializeTupleVariant = BoundedCollect<'a, W>; + type SerializeMap = CollectMap<'a, W>; + type SerializeStruct = CollectMap<'a, W>; + type SerializeStructVariant = CollectMap<'a, W>; + + #[inline] + fn serialize_bool(self, v: bool) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_i8(self, v: i8) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_i16(self, v: i16) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_i32(self, v: i32) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_i64(self, v: i64) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_u8(self, v: u8) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_u16(self, v: u16) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_u32(self, v: u32) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_u64(self, v: u64) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_f32(self, v: f32) -> Result { + // In DAG-CBOR floats are always encoded as f64. + self.serialize_f64(f64::from(v)) + } + + #[inline] + fn serialize_f64(self, v: f64) -> Result { + // In DAG-CBOR only finite floats are supported. + if !v.is_finite() { + Err(EncodeError::Msg( + "Float must be a finite number, not Infinity or NaN".into(), + )) + } else { + v.encode(&mut self.writer)?; + Ok(()) + } + } + + #[inline] + fn serialize_char(self, v: char) -> Result { + let mut buf = [0; 4]; + self.serialize_str(v.encode_utf8(&mut buf)) + } + + #[inline] + fn serialize_str(self, v: &str) -> Result { + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_bytes(self, v: &[u8]) -> Result { + types::Bytes(v).encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_none(self) -> Result { + types::Null.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_some(self, value: &T) -> Result { + value.serialize(self) + } + + #[inline] + fn serialize_unit(self) -> Result { + // The cbor4ii Serde implementation encodes unit as an empty array, for DAG-CBOR we encode + // it as `NULL`. + types::Null.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_unit_struct(self, _name: &'static str) -> Result { + self.serialize_unit() + } + + #[inline] + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + ) -> Result { + self.serialize_str(variant) + } + + #[inline] + fn serialize_newtype_struct( + self, + name: &'static str, + value: &T, + ) -> Result { + if name == CID_SERDE_PRIVATE_IDENTIFIER { + value.serialize(&mut CidSerializer(self)) + } else { + value.serialize(self) + } + } + + #[inline] + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + value: &T, + ) -> Result { + enc::MapStartBounded(1).encode(&mut self.writer)?; + variant.encode(&mut self.writer)?; + value.serialize(self) + } + + #[inline] + fn serialize_seq(self, len: Option) -> Result { + CollectSeq::new(self, len) + } + + #[inline] + fn serialize_tuple(self, len: usize) -> Result { + enc::ArrayStartBounded(len).encode(&mut self.writer)?; + Ok(BoundedCollect { ser: self }) + } + + #[inline] + fn serialize_tuple_struct( + self, + _name: &'static str, + len: usize, + ) -> Result { + self.serialize_tuple(len) + } + + #[inline] + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + enc::MapStartBounded(1).encode(&mut self.writer)?; + variant.encode(&mut self.writer)?; + enc::ArrayStartBounded(len).encode(&mut self.writer)?; + Ok(BoundedCollect { ser: self }) + } + + #[inline] + fn serialize_map(self, _len: Option) -> Result { + Ok(CollectMap::new(self)) + } + + #[inline] + fn serialize_struct( + self, + _name: &'static str, + len: usize, + ) -> Result { + enc::MapStartBounded(len).encode(&mut self.writer)?; + Ok(CollectMap::new(self)) + } + + #[inline] + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + variant: &'static str, + len: usize, + ) -> Result { + enc::MapStartBounded(1).encode(&mut self.writer)?; + variant.encode(&mut self.writer)?; + enc::MapStartBounded(len).encode(&mut self.writer)?; + Ok(CollectMap::new(self)) + } + + #[inline] + fn serialize_i128(self, v: i128) -> Result { + if !(u64::MAX as i128 >= v && -(u64::MAX as i128 + 1) <= v) { + return Err(EncodeError::Msg( + "Integer must be within [-u64::MAX-1, u64::MAX] range".into(), + )); + } + + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn serialize_u128(self, v: u128) -> Result { + if (u64::MAX as u128) < v { + return Err(EncodeError::Msg( + "Unsigned integer must be within [0, u64::MAX] range".into(), + )); + } + v.encode(&mut self.writer)?; + Ok(()) + } + + #[inline] + fn is_human_readable(&self) -> bool { + false + } +} + +/// Struct for implementign SerializeSeq. +pub struct CollectSeq<'a, W> { + /// The number of elements. This is used in case the number of elements is not known + /// beforehand. + count: usize, + ser: &'a mut Serializer, + /// An in-memory serializer in case the number of elements is not known beforehand. + mem_ser: Option>, +} + +impl<'a, W: enc::Write> CollectSeq<'a, W> { + /// If the length of the sequence is given, use it. Else buffer the sequence in order to count + /// the number of elements, which is then written before the elements are. + fn new(ser: &'a mut Serializer, len: Option) -> Result> { + let mem_ser = if let Some(len) = len { + enc::ArrayStartBounded(len).encode(&mut ser.writer)?; + None + } else { + Some(Serializer::new(BufWriter::new(Vec::new()))) + }; + Ok(Self { + count: 0, + ser, + mem_ser, + }) + } +} + +/// Helper for processing collections. +pub struct BoundedCollect<'a, W> { + ser: &'a mut Serializer, +} + +impl serde::ser::SerializeSeq for CollectSeq<'_, W> { + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> { + self.count += 1; + if let Some(ser) = self.mem_ser.as_mut() { + value + .serialize(&mut *ser) + .map_err(|_| EncodeError::Msg("List element cannot be serialized".to_string())) + } else { + value.serialize(&mut *self.ser) + } + } + + #[inline] + fn end(self) -> Result { + // Data was buffered in order to be able to write out the number of elements before they + // are serialized. + if let Some(ser) = self.mem_ser { + enc::ArrayStartBounded(self.count).encode(&mut self.ser.writer)?; + self.ser.writer.push(&ser.into_inner().into_inner())?; + } + + Ok(()) + } +} + +impl serde::ser::SerializeTuple for BoundedCollect<'_, W> { + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> { + value.serialize(&mut *self.ser) + } + + #[inline] + fn end(self) -> Result { + Ok(()) + } +} + +impl serde::ser::SerializeTupleStruct for BoundedCollect<'_, W> { + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> { + value.serialize(&mut *self.ser) + } + + #[inline] + fn end(self) -> Result { + Ok(()) + } +} + +impl serde::ser::SerializeTupleVariant for BoundedCollect<'_, W> { + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> { + value.serialize(&mut *self.ser) + } + + #[inline] + fn end(self) -> Result { + Ok(()) + } +} + +/// CBOR RFC-7049 specifies a canonical sort order, where keys are sorted by length first. This +/// was later revised with RFC-8949, but we need to stick to the original order to stay compatible +/// with existing data. +/// We first serialize each map entry (the key and the value) into a buffer and then sort those +/// buffers. Once sorted they are written to the actual output. +pub struct CollectMap<'a, W> { + buffer: BufWriter, + entries: Vec>, + ser: &'a mut Serializer, +} + +impl<'a, W> CollectMap<'a, W> +where + W: enc::Write, +{ + fn new(ser: &'a mut Serializer) -> Self { + Self { + buffer: BufWriter::new(Vec::new()), + entries: Vec::new(), + ser, + } + } + + fn serialize( + &mut self, + maybe_key: Option<&'static str>, + value: &T, + ) -> Result<(), EncodeError> { + // Instantiate a new serializer, so that the buffer can be re-used. + let mut mem_serializer = Serializer::new(&mut self.buffer); + if let Some(key) = maybe_key { + key.serialize(&mut mem_serializer) + .map_err(|_| EncodeError::Msg("Struct key cannot be serialized.".to_string()))?; + } + value + .serialize(&mut mem_serializer) + .map_err(|_| EncodeError::Msg("Struct value cannot be serialized.".to_string()))?; + + self.entries.push(self.buffer.buffer().to_vec()); + self.buffer.clear(); + + Ok(()) + } + + fn end(mut self) -> Result<(), EncodeError> { + // This sorting step makes sure we have the expected order of the keys. Byte-wise + // comparison over the encoded forms gives us the right order as keys in DAG-CBOR are + // always (text) strings, hence have the same CBOR major type 3. The length of the string + // is encoded in the prefix bits along with the major type. This means that a shorter string + // always sorts before a longer string even with the compact length representation. + self.entries.sort_unstable(); + for entry in self.entries { + self.ser.writer.push(&entry)?; + } + Ok(()) + } +} + +impl serde::ser::SerializeMap for CollectMap<'_, W> +where + W: enc::Write, +{ + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> { + // The key needs to be add to the buffer without any further operations. Serializing the + // value will then do the necessary flushing etc. + let mut mem_serializer = Serializer::new(&mut self.buffer); + key.serialize(&mut mem_serializer) + .map_err(|_| EncodeError::Msg("Map key cannot be serialized.".to_string()))?; + Ok(()) + } + + #[inline] + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> { + self.serialize(None, value) + } + + #[inline] + fn end(self) -> Result { + enc::MapStartBounded(self.entries.len()).encode(&mut self.ser.writer)?; + self.end() + } +} + +impl serde::ser::SerializeStruct for CollectMap<'_, W> +where + W: enc::Write, +{ + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_field( + &mut self, + key: &'static str, + value: &T, + ) -> Result<(), Self::Error> { + self.serialize(Some(key), value) + } + + #[inline] + fn end(self) -> Result { + self.end() + } +} + +impl serde::ser::SerializeStructVariant for CollectMap<'_, W> +where + W: enc::Write, +{ + type Ok = (); + type Error = EncodeError; + + #[inline] + fn serialize_field( + &mut self, + key: &'static str, + value: &T, + ) -> Result<(), Self::Error> { + self.serialize(Some(key), value) + } + + #[inline] + fn end(self) -> Result { + self.end() + } +} + +/// Serializing a CID correctly as DAG-CBOR. +struct CidSerializer<'a, W>(&'a mut Serializer); + +impl<'a, W: enc::Write> ser::Serializer for &'a mut CidSerializer<'a, W> +where + W::Error: core::fmt::Debug, +{ + type Ok = (); + type Error = EncodeError; + + type SerializeSeq = ser::Impossible; + type SerializeTuple = ser::Impossible; + type SerializeTupleStruct = ser::Impossible; + type SerializeTupleVariant = ser::Impossible; + type SerializeMap = ser::Impossible; + type SerializeStruct = ser::Impossible; + type SerializeStructVariant = ser::Impossible; + + fn serialize_bool(self, _value: bool) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_i8(self, _value: i8) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_i16(self, _value: i16) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_i32(self, _value: i32) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_i64(self, _value: i64) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_u8(self, _value: u8) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_u16(self, _value: u16) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_u32(self, _value: u32) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_u64(self, _value: u64) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_f32(self, _value: f32) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_f64(self, _value: f64) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_char(self, _value: char) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_str(self, _value: &str) -> Result { + Err(ser::Error::custom("unreachable")) + } + + fn serialize_bytes(self, value: &[u8]) -> Result { + // The bytes of the CID is prefixed with a null byte when encoded as CBOR. + let prefixed = [&[0x00], value].concat(); + // CIDs are serialized with CBOR tag 42. + types::Tag(CBOR_TAGS_CID, types::Bytes(&prefixed[..])).encode(&mut self.0.writer)?; + Ok(()) + } + + fn serialize_none(self) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_some( + self, + _value: &T, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_unit(self) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_unit_struct(self, _name: &str) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_unit_variant( + self, + _name: &str, + _variant_index: u32, + _variant: &str, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + + fn serialize_newtype_struct( + self, + _name: &str, + _value: &T, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_newtype_variant( + self, + _name: &str, + _variant_index: u32, + _variant: &str, + _value: &T, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_seq(self, _len: Option) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_tuple(self, _len: usize) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_tuple_struct( + self, + _name: &str, + _len: usize, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_tuple_variant( + self, + _name: &str, + _variant_index: u32, + _variant: &str, + _len: usize, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_map(self, _len: Option) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_struct( + self, + _name: &str, + _len: usize, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } + fn serialize_struct_variant( + self, + _name: &str, + _variant_index: u32, + _variant: &str, + _len: usize, + ) -> Result { + Err(ser::Error::custom("unreachable")) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/cid.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/cid.rs new file mode 100644 index 000000000000..a708e9ac755f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/cid.rs @@ -0,0 +1,355 @@ +use std::convert::{TryFrom, TryInto}; +use std::str::FromStr; + +use ipld_core::{cid::Cid, ipld::Ipld}; +use serde::de; +use serde_bytes::ByteBuf; +use serde_derive::{Deserialize, Serialize}; +use serde_ipld_dagcbor::{from_slice, to_vec}; + +#[test] +fn test_cid_struct() { + #[derive(Debug, PartialEq, Deserialize, Serialize)] + struct MyStruct { + cid: Cid, + data: bool, + } + + let cid = Cid::from_str("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); + let cid_encoded = to_vec(&cid).unwrap(); + assert_eq!( + cid_encoded, + [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, + 0xff, 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, + 0x70, 0x64, 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, + ] + ); + + let cid_decoded_as_cid: Cid = from_slice(&cid_encoded).unwrap(); + assert_eq!(cid_decoded_as_cid, cid); + + let cid_decoded_as_ipld: Ipld = from_slice(&cid_encoded).unwrap(); + assert_eq!(cid_decoded_as_ipld, Ipld::Link(cid)); + + // Tests with the Type nested in a struct + + let mystruct = MyStruct { cid, data: true }; + let mystruct_encoded = to_vec(&mystruct).unwrap(); + assert_eq!( + mystruct_encoded, + [ + 0xa2, 0x63, 0x63, 0x69, 0x64, 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, + 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, + 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, + 0x62, 0x66, 0xe7, 0xae, 0x64, 0x64, 0x61, 0x74, 0x61, 0xf5 + ] + ); + + let mystruct_decoded_as_mystruct: MyStruct = from_slice(&mystruct_encoded).unwrap(); + assert_eq!(mystruct_decoded_as_mystruct, mystruct); + + let mystruct_decoded_as_ipld: Ipld = from_slice(&mystruct_encoded).unwrap(); + let mut expected_map = std::collections::BTreeMap::new(); + expected_map.insert("cid".to_string(), Ipld::Link(cid)); + expected_map.insert("data".to_string(), Ipld::Bool(true)); + assert_eq!(mystruct_decoded_as_ipld, Ipld::Map(expected_map)); +} + +/// Test that arbitrary bytes are not interpreted as CID. +#[test] +fn test_binary_not_as_cid() { + // h'affe' + // 42 # bytes(2) + // AFFE # "\xAF\xFE" + let bytes = [0x42, 0xaf, 0xfe]; + let bytes_as_ipld: Ipld = from_slice(&bytes).unwrap(); + assert_eq!(bytes_as_ipld, Ipld::Bytes(vec![0xaf, 0xfe])); +} + +/// Test that CIDs don't decode into byte buffers, lists, etc. +#[test] +fn test_cid_not_as_bytes() { + let cbor_cid = [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, + 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, + 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, + ]; + from_slice::>(&cbor_cid).expect_err("shouldn't have parsed a tagged CID as a sequence"); + from_slice::(&cbor_cid) + .expect_err("shouldn't have parsed a tagged CID as a byte array"); + from_slice::(&cbor_cid[2..]) + .expect("should have parsed an untagged CID as a byte array"); + + #[derive(Debug, Deserialize, PartialEq)] + struct NewType(ByteBuf); + + #[derive(Debug, Deserialize, PartialEq)] + #[serde(untagged)] + enum BytesInEnum { + MyCid(NewType), + } + + // With the `no-cid-as-bytes` feature enabled, we make sure that it will error, when we try to + // decode a CID as bytes. + #[cfg(feature = "no-cid-as-bytes")] + from_slice::(&cbor_cid) + .expect_err("shouldn't have parsed a tagged CID as byte array"); + + // With that feature disabled, then it will decode the CID (without the TAG and the zero + // prefix) as bytes. + #[cfg(not(feature = "no-cid-as-bytes"))] + { + let cid_without_tag = &cbor_cid[5..]; + assert_eq!( + from_slice::(&cbor_cid).unwrap(), + BytesInEnum::MyCid(NewType(ByteBuf::from(cid_without_tag))) + ); + } +} + +/// Test whether a binary CID could be serialized if it isn't prefixed by tag 42. It should fail. +#[test] +fn test_cid_bytes_without_tag() { + let cbor_cid = [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, + 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, + 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, + ]; + let decoded_cbor_cid: Cid = from_slice(&cbor_cid).unwrap(); + assert_eq!(decoded_cbor_cid.to_bytes(), &cbor_cid[5..]); + + // The CID without the tag 42 prefix + let cbor_bytes = &cbor_cid[2..]; + from_slice::(cbor_bytes).expect_err("should have failed to decode bytes as cid"); +} + +/// This test shows how a kinded enum could be implemented. +#[test] +fn test_cid_in_kinded_enum() { + #[derive(Debug, PartialEq)] + pub enum Kinded { + Bytes(ByteBuf), + Link(Cid), + } + + let cbor_cid = [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, + 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, + 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, + ]; + + impl TryFrom for Kinded { + type Error = (); + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::Bytes(bytes) => Ok(Self::Bytes(ByteBuf::from(bytes))), + Ipld::Link(cid) => Ok(Self::Link(cid)), + _ => Err(()), + } + } + } + + impl<'de> de::Deserialize<'de> for Kinded { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ipld::deserialize(deserializer).and_then(|ipld| { + ipld.try_into() + .map_err(|_| de::Error::custom("No matching enum variant found")) + }) + } + } + + let decoded_cid: Kinded = from_slice(&cbor_cid).unwrap(); + let cid = Cid::try_from(&cbor_cid[5..]).unwrap(); + assert_eq!(decoded_cid, Kinded::Link(cid)); + + // The CID without the tag 42 prefix + let cbor_bytes = &cbor_cid[2..]; + let decoded_bytes: Kinded = from_slice(cbor_bytes).unwrap(); + // The CBOR decoded bytes don't contain the prefix with the bytes type identifier and the + // length. + let bytes = cbor_bytes[2..].to_vec(); + assert_eq!(decoded_bytes, Kinded::Bytes(ByteBuf::from(bytes))); + + // Check that random bytes cannot be deserialized. + let random_bytes = &cbor_cid[10..]; + let decoded_random_bytes: Result = from_slice(random_bytes); + assert!(decoded_random_bytes.is_err()); +} + +/// This test shows how a kinded enum could be implemented, when bytes as well as a CID are wrapped +/// in a newtype struct. +#[test] +fn test_cid_in_kinded_enum_with_newtype() { + #[derive(Debug, Deserialize, PartialEq)] + pub struct Foo(#[serde(with = "serde_bytes")] Vec); + + #[derive(Debug, PartialEq)] + pub enum Kinded { + MyBytes(Foo), + Link(Cid), + } + + let cbor_cid = [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, + 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, + 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, + ]; + + impl TryFrom for Kinded { + type Error = (); + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::Bytes(bytes) => Ok(Self::MyBytes(Foo(bytes))), + Ipld::Link(cid) => Ok(Self::Link(cid)), + _ => Err(()), + } + } + } + + impl<'de> de::Deserialize<'de> for Kinded { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ipld::deserialize(deserializer).and_then(|ipld| { + ipld.try_into() + .map_err(|_| de::Error::custom("No matching enum variant found")) + }) + } + } + + let decoded_cid: Kinded = from_slice(&cbor_cid).unwrap(); + // The actual CID is without the CBOR tag 42, the byte identifier and the data length. + let cid = Cid::try_from(&cbor_cid[5..]).unwrap(); + assert_eq!(decoded_cid, Kinded::Link(cid)); + + // The CID without the tag 42 prefix + let cbor_bytes = &cbor_cid[2..]; + let decoded_bytes: Kinded = from_slice(cbor_bytes).unwrap(); + // The CBOR decoded bytes don't contain the prefix with the bytes type identifier and the + // length. + let bytes = cbor_bytes[2..].to_vec(); + assert_eq!(decoded_bytes, Kinded::MyBytes(Foo(bytes))); + + // Check that random bytes cannot be deserialized. + let random_bytes = &cbor_cid[10..]; + let decoded_random_bytes: Result = from_slice(random_bytes); + assert!(decoded_random_bytes.is_err()); +} + +#[test] +fn test_cid_in_tagged_enum() { + #[derive(Debug, Deserialize, PartialEq)] + pub enum Externally { + Cid(Cid), + } + + #[derive(Debug, Deserialize, PartialEq)] + #[serde(tag = "type")] + pub enum Internally { + Cid { cid: Cid }, + } + + #[derive(Debug, Deserialize, PartialEq)] + #[serde(untagged)] + pub enum Untagged { + Cid(Cid), + } + + let cbor_cid = [ + 0xd8, 0x2a, 0x58, 0x25, 0x00, 0x01, 0x55, 0x12, 0x20, 0x2c, 0x26, 0xb4, 0x6b, 0x68, 0xff, + 0xc6, 0x8f, 0xf9, 0x9b, 0x45, 0x3c, 0x1d, 0x30, 0x41, 0x34, 0x13, 0x42, 0x2d, 0x70, 0x64, + 0x83, 0xbf, 0xa0, 0xf9, 0x8a, 0x5e, 0x88, 0x62, 0x66, 0xe7, 0xae, + ]; + + // {"Cid": cid} + let cbor_map1 = [vec![0xa1, 0x63, 0x43, 0x69, 0x64], Vec::from(cbor_cid)].concat(); + + // {"cid": cid, "type": "Cid"} + let cbor_map2 = [ + vec![ + 0xa2, 0x64, 0x74, 0x79, 0x70, 0x65, 0x63, 0x43, 0x69, 0x64, 0x63, 0x63, 0x69, 0x64, + ], + Vec::from(cbor_cid), + ] + .concat(); + + let cid = Cid::try_from(&cbor_cid[5..]).unwrap(); + + let decoded: Externally = from_slice(&cbor_map1).unwrap(); + assert_eq!(decoded, Externally::Cid(cid)); + + // With the `no-cid-as-bytes` feature enabled, it's not possible to use internally tagged or + // untaggd enums. This behaviour is *not* intentionally, but incidentally due to how Serde + // internally works.. This test is only added to see what one could expect, and to get + // notified in case it ever gets supported. + #[cfg(feature = "no-cid-as-bytes")] + { + from_slice::(&cbor_map2) + .expect_err("shouldn't be able to decode the intanlly tagged enum"); + from_slice::(&cbor_cid) + .expect_err("shouldn't be able to decode the untagged enum"); + } + + // With that feature disabled, it's the expected desired behaviour. + #[cfg(not(feature = "no-cid-as-bytes"))] + { + let decoded: Internally = from_slice(&cbor_map2).unwrap(); + assert_eq!(decoded, Internally::Cid { cid }); + + let decoded: Untagged = from_slice(&cbor_cid).unwrap(); + assert_eq!(decoded, Untagged::Cid(cid)); + } +} + +#[test] +fn test_cid_empty_errors() { + // Tag 42 with zero bytes + let cbor_empty_cid = [0xd8, 0x2a, 0x40]; + + let decoded: Result = from_slice(&cbor_empty_cid); + assert!(decoded.is_err()); +} + +#[test] +fn test_cid_non_minimally_encoded() { + let cid = Cid::from_str("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); + let cid_encoded = to_vec(&cid).unwrap(); + + let decoded: Cid = from_slice(&cid_encoded).unwrap(); + assert_eq!(decoded, cid); + + // Strip off the CBOR tag. + let without_tag = &cid_encoded[2..]; + + let tag_2_bytes_encoded = [&[0xd9, 0x00, 0x2a], without_tag].concat(); + let tag_2_bytes_decoded: Cid = from_slice(&tag_2_bytes_encoded).unwrap(); + assert_eq!(tag_2_bytes_decoded, cid); + + let tag_4_bytes_encoded = [&[0xda, 0x00, 0x00, 0x00, 0x2a], without_tag].concat(); + let tag_4_bytes_decoded: Cid = from_slice(&tag_4_bytes_encoded).unwrap(); + assert_eq!(tag_4_bytes_decoded, cid); + + let tag_8_bytes_encoded = [ + &[0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a], + without_tag, + ] + .concat(); + let tag_8_bytes_decoded: Cid = from_slice(&tag_8_bytes_encoded).unwrap(); + assert_eq!(tag_8_bytes_decoded, cid); +} + +#[test] +fn test_cid_decode_from_reader() { + let cid_encoded = [ + 0xd8, 0x2a, 0x49, 0x00, 0x01, 0xce, 0x01, 0x9b, 0x01, 0x02, 0x63, 0xc8, + ]; + let cid_decoded: Cid = from_slice(&cid_encoded).unwrap(); + assert_eq!(&cid_encoded[4..], &cid_decoded.to_bytes()); +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/codec.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/codec.rs new file mode 100644 index 000000000000..31463cfcbc3e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/codec.rs @@ -0,0 +1,66 @@ +#![cfg(all(feature = "std", not(feature = "no-cid-as-bytes")))] + +use core::{convert::TryFrom, iter}; + +use ipld_core::{ + cid::Cid, + codec::{Codec, Links}, + ipld, + ipld::Ipld, +}; +use serde_ipld_dagcbor::codec::DagCborCodec; + +#[test] +fn test_codec_encode() { + let data = "hello world!".to_string(); + let expected = b"\x6chello world!"; + + let mut output = Vec::new(); + DagCborCodec::encode(&mut output, &data).unwrap(); + assert_eq!(output, expected); + + let encoded = DagCborCodec::encode_to_vec(&data).unwrap(); + assert_eq!(encoded, expected); +} + +#[test] +fn test_codec_decode() { + let data = b"\x6chello world!"; + let expected = "hello world!".to_string(); + + let decoded: String = DagCborCodec::decode(&data[..]).unwrap(); + assert_eq!(decoded, expected); + + let decoded_from_slice: String = DagCborCodec::decode_from_slice(data).unwrap(); + assert_eq!(decoded_from_slice, expected); +} + +#[test] +fn test_codec_links() { + let cid = Cid::try_from("bafkreibme22gw2h7y2h7tg2fhqotaqjucnbc24deqo72b6mkl2egezxhvy").unwrap(); + let data: Ipld = ipld!({"some": {"nested": cid}, "or": [cid, cid], "foo": true}); + let expected = iter::repeat(cid).take(3).collect::>(); + + let mut encoded = Vec::new(); + DagCborCodec::encode(&mut encoded, &data).unwrap(); + + let links = DagCborCodec::links(&encoded).unwrap(); + assert_eq!(links.collect::>(), expected); +} + +#[test] +fn test_codec_generic() { + fn encode_generic(value: T) -> Vec + where + C: Codec, + C::Error: std::fmt::Debug, + { + C::encode_to_vec(&value).unwrap() + } + + let data = "hello world!".to_string(); + let expected = b"\x6chello world!"; + + let encoded = encode_generic::(data); + assert_eq!(encoded, expected); +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/crash.cbor b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/crash.cbor similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/crash.cbor rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/crash.cbor diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/de.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/de.rs new file mode 100644 index 000000000000..e1feaa63ffa1 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/de.rs @@ -0,0 +1,316 @@ +use std::collections::BTreeMap; + +use ipld_core::ipld::Ipld; +use serde_ipld_dagcbor::{de, to_vec, DecodeError}; + +#[test] +fn test_string1() { + let ipld: Result = de::from_slice(&[0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]); + assert_eq!(ipld.unwrap(), Ipld::String("foobar".to_string())); +} + +#[test] +fn test_string2() { + let ipld: Result = de::from_slice(&[ + 0x71, 0x49, 0x20, 0x6d, 0x65, 0x74, 0x20, 0x61, 0x20, 0x74, 0x72, 0x61, 0x76, 0x65, 0x6c, + 0x6c, 0x65, 0x72, + ]); + assert_eq!(ipld.unwrap(), Ipld::String("I met a traveller".to_string())); +} + +#[test] +fn test_string3() { + let slice = b"\x78\x2fI met a traveller from an antique land who said"; + let ipld: Result = de::from_slice(slice); + assert_eq!( + ipld.unwrap(), + Ipld::String("I met a traveller from an antique land who said".to_string()) + ); +} + +#[test] +fn test_byte_string() { + let ipld: Result = de::from_slice(&[0x46, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]); + assert_eq!(ipld.unwrap(), Ipld::Bytes(b"foobar".to_vec())); +} + +#[test] +fn test_numbers1() { + let ipld: Result = de::from_slice(&[0x00]); + assert_eq!(ipld.unwrap(), Ipld::Integer(0)); +} + +#[test] +fn test_numbers2() { + let ipld: Result = de::from_slice(&[0x1a, 0x00, 0xbc, 0x61, 0x4e]); + assert_eq!(ipld.unwrap(), Ipld::Integer(12345678)); +} + +#[test] +fn test_numbers3() { + let ipld: Result = de::from_slice(&[0x39, 0x07, 0xde]); + assert_eq!(ipld.unwrap(), Ipld::Integer(-2015)); +} + +#[test] +fn test_numbers_large_negative() { + let ipld: Result = + de::from_slice(&[0x3b, 0xa5, 0xf7, 0x02, 0xb3, 0xa5, 0xf7, 0x02, 0xb3]); + let expected: i128 = -11959030306112471732; + assert!(expected < i128::from(i64::MIN)); + assert_eq!(ipld.unwrap(), Ipld::Integer(expected)); +} + +#[test] +fn test_bool() { + let ipld: Result = de::from_slice(b"\xf4"); + assert_eq!(ipld.unwrap(), Ipld::Bool(false)); +} + +#[test] +fn test_trailing_bytes() { + let ipld: Result = de::from_slice(b"\xf4trailing"); + assert!(matches!(ipld.unwrap_err(), DecodeError::TrailingData)); +} + +#[test] +fn test_list1() { + let ipld: Result = de::from_slice(b"\x83\x01\x02\x03"); + assert_eq!( + ipld.unwrap(), + Ipld::List(vec![Ipld::Integer(1), Ipld::Integer(2), Ipld::Integer(3)]) + ); +} + +#[test] +fn test_list2() { + let ipld: Result = de::from_slice(b"\x82\x01\x82\x02\x81\x03"); + assert_eq!( + ipld.unwrap(), + Ipld::List(vec![ + Ipld::Integer(1), + Ipld::List(vec![Ipld::Integer(2), Ipld::List(vec![Ipld::Integer(3)])]) + ]) + ); +} + +#[test] +fn test_object() { + let ipld: Result = de::from_slice(b"\xa5aaaAabaBacaCadaDaeaE"); + let mut object = BTreeMap::new(); + object.insert("a".to_string(), Ipld::String("A".to_string())); + object.insert("b".to_string(), Ipld::String("B".to_string())); + object.insert("c".to_string(), Ipld::String("C".to_string())); + object.insert("d".to_string(), Ipld::String("D".to_string())); + object.insert("e".to_string(), Ipld::String("E".to_string())); + assert_eq!(ipld.unwrap(), Ipld::Map(object)); +} + +#[test] +fn test_indefinite_object_error() { + let ipld: Result = de::from_slice(b"\xbfaa\x01ab\x9f\x02\x03\xff\xff"); + let mut object = BTreeMap::new(); + object.insert("a".to_string(), Ipld::Integer(1)); + object.insert( + "b".to_string(), + Ipld::List(vec![Ipld::Integer(2), Ipld::Integer(3)]), + ); + assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); +} + +#[test] +fn test_indefinite_list_error() { + let ipld: Result = de::from_slice(b"\x9f\x01\x02\x03\xff"); + assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); +} + +#[test] +fn test_indefinite_string_error() { + let ipld: Result = + de::from_slice(b"\x7f\x65Mary \x64Had \x62a \x67Little \x60\x64Lamb\xff"); + assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); +} + +#[test] +fn test_indefinite_byte_string_error() { + let ipld: Result = de::from_slice(b"\x5f\x42\x01\x23\x42\x45\x67\xff"); + assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); +} + +#[test] +fn test_multiple_indefinite_strings_error() { + let input = b"\x82\x7f\x65Mary \x64Had \x62a \x67Little \x60\x64Lamb\xff\x5f\x42\x01\x23\x42\x45\x67\xff"; + let ipld: Result = de::from_slice(input); + assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); +} + +#[test] +fn test_float() { + let ipld: Result = de::from_slice(b"\xfa\x47\xc3\x50\x00"); + assert_eq!(ipld.unwrap(), Ipld::Float(100000.0)); +} + +#[test] +fn test_rejected_tag() { + let ipld: Result = + de::from_slice(&[0xd9, 0xd9, 0xf7, 0x66, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72]); + assert!(matches!( + ipld.unwrap_err(), + DecodeError::TypeMismatch { + name: "CBOR tag", + byte: 0xf7 + } + )); +} + +#[test] +fn test_crazy_list() { + let slice = b"\x86\x1b\x00\x00\x00\x1c\xbe\x99\x1d\xc7\x3b\x00\x7a\xcf\x51\xdc\x51\x70\xdb\x3a\x1b\x3a\x06\xdd\xf5\xf6\xfb\x41\x76\x5e\xb1\xf8\x00\x00\x00"; + let ipld: Vec = de::from_slice(slice).unwrap(); + assert_eq!( + ipld, + vec![ + Ipld::Integer(123456789959), + Ipld::Integer(-34567897654325468), + Ipld::Integer(-456787678), + Ipld::Bool(true), + Ipld::Null, + Ipld::Float(23456543.5), + ] + ); +} + +#[test] +fn test_nan() { + let ipld: Result = de::from_slice(b"\xf9\x7e\x00"); + assert!(matches!( + ipld.unwrap_err(), + DecodeError::TypeMismatch { .. } + )); +} + +#[test] +// The file was reported as not working by user kie0tauB +// but it parses to a cbor value. +fn test_kietaub_file() { + let file = include_bytes!("kietaub.cbor"); + let value_result: Result = de::from_slice(file); + value_result.unwrap(); +} + +#[test] +fn test_option_roundtrip() { + let obj1 = Some(10u32); + + let v = to_vec(&obj1).unwrap(); + let obj2: Result, _> = de::from_slice(&v[..]); + println!("{:?}", obj2); + + assert_eq!(obj1, obj2.unwrap()); +} + +#[test] +fn test_option_none_roundtrip() { + let obj1 = None; + + let v = to_vec(&obj1).unwrap(); + println!("{:?}", v); + let obj2: Result, _> = de::from_slice(&v[..]); + + assert_eq!(obj1, obj2.unwrap()); +} + +#[test] +fn test_unit() { + #[allow(clippy::let_unit_value)] + let unit = (); + let v = to_vec(&unit).unwrap(); + assert_eq!(v, [0xf6], "unit is serialized as NULL."); + let result: Result<(), _> = from_slice(&v); + assert!(result.is_ok(), "unit was successfully deserialized"); +} + +#[test] +fn test_variable_length_map_error() { + let slice = b"\xbf\x67\x6d\x65\x73\x73\x61\x67\x65\x64\x70\x6f\x6e\x67\xff"; + let ipld: Result = de::from_slice(slice); + assert!(matches!(ipld.unwrap_err(), DecodeError::IndefiniteSize)); +} + +#[test] +fn test_object_determinism_roundtrip() { + let expected = b"\xa2aa\x01ab\x82\x02\x03"; + + // 0.1% chance of not catching failure + for _ in 0..10 { + assert_eq!( + &to_vec(&de::from_slice::(expected).unwrap()).unwrap(), + expected + ); + } +} + +#[test] +fn crash() { + let file = include_bytes!("crash.cbor"); + let value_result: Result = de::from_slice(file); + assert!(matches!(value_result.unwrap_err(), DecodeError::Eof)); +} + +use serde_ipld_dagcbor::de::from_slice; +use std::net::{IpAddr, Ipv4Addr}; +#[test] +fn test_ipaddr_deserialization() { + let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let buf = to_vec(&ip).unwrap(); + let deserialized_ip = from_slice::(&buf).unwrap(); + assert_eq!(ip, deserialized_ip); +} + +#[test] +fn attempt_stack_overflow() { + // Create a tag 17, followed by 999 more tag 17: + // 17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(17(... + // This causes deep recursion in the decoder and may + // exhaust the stack and therfore result in a stack overflow. + let input = vec![0xd1; 1000]; + serde_ipld_dagcbor::from_slice::(&input).expect_err("recursion limit"); +} + +#[test] +fn truncated_object() { + let input: Vec = [ + &b"\x84\x87\xD8\x2A\x58\x27\x00\x01\x71\xA0\xE4\x02\x20\x83\xEC\x9F\x76\x1D"[..], + &b"\xB5\xEE\xA0\xC8\xE1\xB5\x74\x0D\x1F\x0A\x1D\xB1\x8A\x52\x6B\xCB\x42\x69"[..], + &b"\xFD\x99\x24\x9E\xCE\xA9\xE8\xFD\x24\xD8\x2A\x58\x27\x00\x01\x71\xA0\xE4"[..], + &b"\x02\x20\xF1\x9B\xC1\x42\x83\x31\xB1\x39\xB3\x3F\x43\x02\x87\xCC\x1C\x12"[..], + &b"\xF2\x84\x47\xA3\x9B\x07\x59\x40\x17\x68\xFE\xE8\x09\xBB\xF2\x54\xD8\x2A"[..], + &b"\x58\x27\x00\x01\x71\xA0\xE4\x02\x20\xB0\x75\x09\x92\x78\x6B\x6B\x4C\xED"[..], + &b"\xF0\xE1\x50\xA3\x1C\xAB\xDF\x25\xA9\x26\x8C\x63\xDD\xCB\x25\x73\x6B\xF5"[..], + &b"\x8D\xE8\xA4\x24\x29"[..], + ] + .concat(); + serde_ipld_dagcbor::from_slice::(&input).expect_err("truncated"); +} + +#[test] +fn invalid_string() { + // Non UTF-8 byte sequence, but using major type 3 (text string) + let input = [0x63, 0xc5, 0x01, 0x02]; + let result = serde_ipld_dagcbor::from_slice::(&input); + assert!(matches!( + result.unwrap_err(), + DecodeError::InvalidUtf8 { .. } + )); +} + +#[test] +fn error_on_undefined() { + // CBOR smple type `undefined` + let input = [0xf7]; + let result = serde_ipld_dagcbor::from_slice::(&input); + assert!(matches!( + result.unwrap_err(), + DecodeError::Unsupported { .. } + )); +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/enum.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/enum.rs similarity index 98% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/enum.rs rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/enum.rs index 3c5c8505d61b..c44bdfbfc681 100644 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/enum.rs +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/enum.rs @@ -1,4 +1,4 @@ -use serde::{Deserialize, Serialize}; +use serde_derive::{Deserialize, Serialize}; use serde_ipld_dagcbor::{from_slice, to_vec, DecodeError}; diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/ipld.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/ipld.rs new file mode 100644 index 000000000000..37dc39628704 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/ipld.rs @@ -0,0 +1,85 @@ +use std::collections::BTreeMap; + +use ipld_core::ipld::Ipld; +use serde_derive::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +struct TupleStruct(String, i32, u64); + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +struct UnitStruct; + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +struct Struct<'a> { + tuple_struct: TupleStruct, + tuple: (String, f32, f64), + map: BTreeMap, + bytes: &'a [u8], + array: Vec, +} + +use std::iter::FromIterator; + +#[allow(clippy::useless_format)] +#[test] +fn serde() { + let tuple_struct = TupleStruct(format!("test"), -60, 3000); + + let tuple = (format!("hello"), -50.004097, -12.094635556478); + + let map = BTreeMap::from_iter( + [ + (format!("key1"), format!("value1")), + (format!("key2"), format!("value2")), + (format!("key3"), format!("value3")), + (format!("key4"), format!("value4")), + ] + .iter() + .cloned(), + ); + + let bytes = b"test byte string"; + + let array = vec![format!("one"), format!("two"), format!("three")]; + + let data = Struct { + tuple_struct, + tuple, + map, + bytes, + array, + }; + + let ipld = ipld_core::serde::to_ipld(data.clone()).unwrap(); + println!("{:?}", ipld); + + let data_ser = serde_ipld_dagcbor::to_vec(&ipld).unwrap(); + let data_de_ipld: Ipld = serde_ipld_dagcbor::from_slice(&data_ser).unwrap(); + + fn as_object(ipld: &Ipld) -> &BTreeMap { + if let Ipld::Map(ref v) = ipld { + return v; + } + panic!() + } + + for ((k1, v1), (k2, v2)) in as_object(&ipld).iter().zip(as_object(&data_de_ipld).iter()) { + assert_eq!(k1, k2); + assert_eq!(v1, v2); + } + + assert_eq!(ipld, data_de_ipld); +} + +#[test] +fn unit_struct_not_supported() { + let unit_array = vec![UnitStruct, UnitStruct, UnitStruct]; + let ipld = ipld_core::serde::to_ipld(unit_array); + assert!(ipld.is_err()); +} + +#[derive(Debug, Deserialize, Serialize)] +struct SmallStruct { + spam: u32, + eggs: u32, +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/kietaub.cbor b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/kietaub.cbor similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/kietaub.cbor rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/kietaub.cbor diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/ser.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/ser.rs new file mode 100644 index 000000000000..b52b38b776e6 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/ser.rs @@ -0,0 +1,245 @@ +use std::{collections::BTreeMap, iter}; + +use serde::de::value::{self, MapDeserializer, SeqDeserializer}; +use serde_bytes::{ByteBuf, Bytes}; +use serde_derive::Serialize; +use serde_ipld_dagcbor::{ + from_slice, + ser::{BufWriter, Serializer}, + to_vec, +}; + +#[test] +fn test_string() { + let value = "foobar".to_owned(); + assert_eq!(&to_vec(&value).unwrap()[..], b"ffoobar"); +} + +#[test] +fn test_list() { + let value = vec![1, 2, 3]; + assert_eq!(&to_vec(&value).unwrap()[..], b"\x83\x01\x02\x03"); +} + +#[test] +fn test_object() { + let mut object = BTreeMap::new(); + object.insert("a".to_owned(), "A".to_owned()); + object.insert("b".to_owned(), "B".to_owned()); + object.insert("c".to_owned(), "C".to_owned()); + object.insert("d".to_owned(), "D".to_owned()); + object.insert("e".to_owned(), "E".to_owned()); + let vec = to_vec(&object).unwrap(); + let test_object = from_slice(&vec[..]).unwrap(); + assert_eq!(object, test_object); +} + +#[test] +fn test_float() { + let vec = to_vec(&12.3f64).unwrap(); + assert_eq!(vec, b"\xfb@(\x99\x99\x99\x99\x99\x9a"); +} + +#[test] +fn test_f32() { + let vec = to_vec(&4000.5f32).unwrap(); + assert_eq!(vec, b"\xfb\x40\xaf\x41\x00\x00\x00\x00\x00"); +} + +#[test] +fn test_infinity() { + let vec = to_vec(&::std::f64::INFINITY); + assert!(vec.is_err(), "Only finite numbers are supported."); +} + +#[test] +fn test_neg_infinity() { + let vec = to_vec(&::std::f64::NEG_INFINITY); + assert!(vec.is_err(), "Only finite numbers are supported."); +} + +#[test] +fn test_nan() { + let vec = to_vec(&::std::f32::NAN); + assert!(vec.is_err(), "Only finite numbers are supported."); +} + +#[test] +fn test_integer() { + // u8 + let vec = to_vec(&24).unwrap(); + assert_eq!(vec, b"\x18\x18"); + // i8 + let vec = to_vec(&-5).unwrap(); + assert_eq!(vec, b"\x24"); + // i16 + let vec = to_vec(&-300).unwrap(); + assert_eq!(vec, b"\x39\x01\x2b"); + // i32 + let vec = to_vec(&-23567997).unwrap(); + assert_eq!(vec, b"\x3a\x01\x67\x9e\x7c"); + // u64 + let vec = to_vec(&::std::u64::MAX).unwrap(); + assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff"); + // u128 within u64 range + let vec = to_vec(&(u64::MAX as u128)).unwrap(); + assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff"); + // u128 out of range + assert!(to_vec(&(u64::MAX as u128 + 1)).is_err()); + // i128 within u64 range + let vec = to_vec(&(u64::MAX as i128)).unwrap(); + assert_eq!(vec, b"\x1b\xff\xff\xff\xff\xff\xff\xff\xff"); + // i128 within -u64 range + let vec = to_vec(&(-(u64::MAX as i128))).unwrap(); + assert_eq!(vec, b"\x3B\xff\xff\xff\xff\xff\xff\xff\xfe"); + // minimum CBOR integer value + let vec = to_vec(&(-(u64::MAX as i128 + 1))).unwrap(); + assert_eq!(vec, b"\x3B\xff\xff\xff\xff\xff\xff\xff\xff"); + // i128 out of -u64 range + assert!(to_vec(&i128::MIN).is_err()); +} + +#[test] +fn test_ip_addr() { + use std::net::Ipv4Addr; + + let addr = Ipv4Addr::new(8, 8, 8, 8); + let vec = to_vec(&addr).unwrap(); + println!("{:?}", vec); + assert_eq!(vec.len(), 5); + let test_addr: Ipv4Addr = from_slice(&vec).unwrap(); + assert_eq!(addr, test_addr); +} + +/// Test all of CBOR's fixed-length byte string types +#[test] +fn test_byte_string() { + // Very short byte strings have 1-byte headers + let short = serde_bytes::Bytes::new(&[0, 1, 2, 255]); + let short_s = to_vec(&short).unwrap(); + assert_eq!(short_s, [0x44, 0, 1, 2, 255]); + + // byte strings > 23 bytes have 2-byte headers + let medium = Bytes::new(&[ + 0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 255, + ]); + let medium_s = to_vec(&medium).unwrap(); + assert_eq!( + medium_s, + [ + 0x58, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 255 + ] + ); + + // byte strings > 256 bytes have 3-byte headers + let long_vec = ByteBuf::from((0..256).map(|i| (i & 0xFF) as u8).collect::>()); + let long_s = to_vec(&long_vec).unwrap(); + assert_eq!(&long_s[0..3], [0x59, 1, 0]); + assert_eq!(&long_s[3..], &long_vec[..]); + + // byte strings > 2^16 bytes have 5-byte headers + let very_long_vec = ByteBuf::from((0..65536).map(|i| (i & 0xFF) as u8).collect::>()); + let very_long_s = to_vec(&very_long_vec).unwrap(); + assert_eq!(&very_long_s[0..5], [0x5a, 0, 1, 0, 0]); + assert_eq!(&very_long_s[5..], &very_long_vec[..]); + + // byte strings > 2^32 bytes have 9-byte headers, but they take too much RAM + // to test in Travis. +} + +/// This test checks that the keys of a map are sorted correctly, independently of the order of the +/// input. +#[test] +fn test_key_order_transcode_map() { + // CBOR encoded {"a": 1, "b": 2} + let expected = [0xa2, 0x61, 0x61, 0x01, 0x61, 0x62, 0x02]; + + let data = vec![("b", 2), ("a", 1)]; + let deserializer: MapDeserializer<'_, _, value::Error> = MapDeserializer::new(data.into_iter()); + let writer = BufWriter::new(Vec::new()); + let mut serializer = Serializer::new(writer); + serde_transcode::transcode(deserializer, &mut serializer).unwrap(); + let result = serializer.into_inner().into_inner(); + assert_eq!(result, expected); +} + +// This test makes sure that even unbound lists are not encoded as such (as lists in DAG-CBOR need +// to be finite). +#[test] +fn test_non_unbound_list() { + // Create an iterator that has no size hint. This would trigger the "unbounded code path" for + // sequences. + let one_two_three_iter = iter::successors( + Some(1), + move |&num| { + if num < 3 { + Some(num + 1) + } else { + None + } + }, + ); + + // CBOR encoded [1, 2, 3] + let expected = [0x83, 0x01, 0x02, 0x03]; + + let deserializer: SeqDeserializer<_, value::Error> = SeqDeserializer::new(one_two_three_iter); + let writer = BufWriter::new(Vec::new()); + let mut serializer = Serializer::new(writer); + serde_transcode::transcode(deserializer, &mut serializer).unwrap(); + let result = serializer.into_inner().into_inner(); + assert_eq!(result, expected); +} + +#[test] +fn test_struct_canonical() { + #[derive(Serialize)] + struct First { + a: u32, + b: u32, + } + #[derive(Serialize)] + struct Second { + b: u32, + a: u32, + } + + let first = First { a: 1, b: 2 }; + let second = Second { a: 1, b: 2 }; + + let first_bytes = serde_ipld_dagcbor::to_vec(&first).unwrap(); + let second_bytes = serde_ipld_dagcbor::to_vec(&second).unwrap(); + + assert_eq!(first_bytes, second_bytes); + // Do not only make sure that the order is the same, but also that it's correct. + assert_eq!(first_bytes, b"\xa2\x61a\x01\x61b\x02") +} + +#[test] +fn test_struct_variant_canonical() { + // The `abc` is there to make sure it really follows the DAG-CBOR sorting order, which sorts by + // length of the keys first, then lexicographically. It means that `abc` sorts *after* `b`. + #[derive(Serialize)] + enum First { + Data { a: u8, b: u8, abc: u8 }, + } + + #[derive(Serialize)] + enum Second { + Data { b: u8, abc: u8, a: u8 }, + } + + let first = First::Data { a: 1, b: 2, abc: 3 }; + let second = Second::Data { a: 1, b: 2, abc: 3 }; + + let first_bytes = serde_ipld_dagcbor::to_vec(&first).unwrap(); + let second_bytes = serde_ipld_dagcbor::to_vec(&second).unwrap(); + + assert_eq!(first_bytes, second_bytes); + // Do not only make sure that the order is the same, but also that it's correct. + assert_eq!( + first_bytes, + b"\xa1\x64Data\xa3\x61a\x01\x61b\x02\x63abc\x03" + ) +} diff --git a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/std_types.rs b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/std_types.rs similarity index 95% rename from third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/std_types.rs rename to third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/std_types.rs index 08e1793487b4..a26ecffd3dca 100644 --- a/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.4.2/tests/std_types.rs +++ b/third_party/rust/chromium_crates_io/vendor/serde_ipld_dagcbor-0.6.1/tests/std_types.rs @@ -1,4 +1,4 @@ -use serde::{Deserialize, Serialize}; +use serde_derive::{Deserialize, Serialize}; use serde_ipld_dagcbor::{from_slice, to_vec}; @@ -100,7 +100,7 @@ testcase!(test_person_struct, year_of_birth: 1906, profession: Some("computer scientist".to_string()), }, - "a3646e616d656c477261636520486f707065726d796561725f6f665f62697274681907726a70726f66657373696f6e72636f6d707574657220736369656e74697374"); + "a3646e616d656c477261636520486f707065726a70726f66657373696f6e72636f6d707574657220736369656e746973746d796561725f6f665f6269727468190772"); #[derive(Debug, PartialEq, Deserialize, Serialize)] struct OptionalPerson { diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/.cargo_vcs_info.json deleted file mode 100644 index cdb256b92615..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/.cargo_vcs_info.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "git": { - "sha1": "b4b070c3faf87cb8f324bd0ed0a5e5ec32d3a5b0" - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/CHANGELOG.md deleted file mode 100644 index 152e7bcac621..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/CHANGELOG.md +++ /dev/null @@ -1,59 +0,0 @@ -# Changelog - -Entries are listed in reverse chronological order. - -## 2.4.1 - -* Fix a bug in how the README was included in the documentation builds - which caused nightly builds to break. - -## 2.4.0 - -* Add new `ConstantTimeGreater` and `ConstantTimeLess` traits, as well - as implementations for unsigned integers, by @isislovecruft. - -## 2.3.0 - -* Add `impl ConstantTimeEq for Choice` by @tarcieri. -* Add `impl From> for Option` by @CPerezz. This is useful for - handling library code that produces `CtOption`s in contexts where timing - doesn't matter. -* Introduce an MSRV policy. - -## 2.2.3 - -* Remove the `nightly`-only asm-based `black_box` barrier in favor of the - volatile-based one, fixing compilation on current nightlies. - -## 2.2.2 - -* Update README.md to clarify that 2.2 and above do not require the `nightly` - feature. - -## 2.2.1 - -* Adds an `or_else` combinator for `CtOption`, by @ebfull. -* Optimized `black_box` for `nightly`, by @jethrogb. -* Optimized `black_box` for `stable`, by @dsprenkels. -* Fixed CI for `no_std`, by @dsprenkels. -* Fixed fuzz target compilation, by @3for. - -## 2.2.0 - -* Error during `cargo publish`, yanked. - -## 2.1.1 - -* Adds the "crypto" tag to crate metadata. -* New shorter, more efficient ct_eq() for integers, contributed by Thomas Pornin. - -## 2.1.0 - -* Adds a new `CtOption` which acts as a constant-time `Option` - (thanks to @ebfull for the implementation). -* `Choice` now itself implements `ConditionallySelectable`. - -## 2.0.0 - -* Stable version with traits reworked from 1.0.0 to interact better - with the orphan rules. diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/Cargo.toml deleted file mode 100644 index 8689ad2f78a6..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "subtle" -version = "2.4.1" -authors = ["Isis Lovecruft ", "Henry de Valence "] -exclude = ["**/.gitignore", ".travis.yml"] -description = "Pure-Rust traits and utilities for constant-time cryptographic implementations." -homepage = "https://dalek.rs/" -documentation = "https://docs.rs/subtle" -readme = "README.md" -keywords = ["cryptography", "crypto", "constant-time", "utilities"] -categories = ["cryptography", "no-std"] -license = "BSD-3-Clause" -repository = "https://github.com/dalek-cryptography/subtle" -[dev-dependencies.rand] -version = "0.7" - -[features] -default = ["std", "i128"] -i128 = [] -nightly = [] -std = [] -[badges.travis-ci] -branch = "master" -repository = "dalek-cryptography/subtle" diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/Cargo.toml.orig deleted file mode 100644 index 1c850348cfba..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/Cargo.toml.orig +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "subtle" -# Before incrementing: -# - update CHANGELOG -# - update html_root_url -# - update README if necessary by semver -# - if any updates were made to the README, also update the module documentation in src/lib.rs -version = "2.4.1" -authors = ["Isis Lovecruft ", - "Henry de Valence "] -readme = "README.md" -license = "BSD-3-Clause" -repository = "https://github.com/dalek-cryptography/subtle" -homepage = "https://dalek.rs/" -documentation = "https://docs.rs/subtle" -categories = ["cryptography", "no-std"] -keywords = ["cryptography", "crypto", "constant-time", "utilities"] -description = "Pure-Rust traits and utilities for constant-time cryptographic implementations." -exclude = [ - "**/.gitignore", - ".travis.yml", -] - -[badges] -travis-ci = { repository = "dalek-cryptography/subtle", branch = "master"} - -[dev-dependencies] -rand = { version = "0.7" } - -[features] -default = ["std", "i128"] -std = [] -i128 = [] -# DEPRECATED: As of 2.4.1, this feature does nothing. -nightly = [] diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/LICENSE b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/LICENSE deleted file mode 100644 index 927c02c80b9a..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2016-2017 Isis Agora Lovecruft, Henry de Valence. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/README.md b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/README.md deleted file mode 100644 index 81ac448fe34e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# subtle [![](https://img.shields.io/crates/v/subtle.svg)](https://crates.io/crates/subtle) [![](https://img.shields.io/badge/dynamic/json.svg?label=docs&uri=https%3A%2F%2Fcrates.io%2Fapi%2Fv1%2Fcrates%2Fsubtle%2Fversions&query=%24.versions%5B0%5D.num&colorB=4F74A6)](https://doc.dalek.rs/subtle) [![](https://travis-ci.org/dalek-cryptography/subtle.svg?branch=master)](https://travis-ci.org/dalek-cryptography/subtle) - -**Pure-Rust traits and utilities for constant-time cryptographic implementations.** - -It consists of a `Choice` type, and a collection of traits using `Choice` -instead of `bool` which are intended to execute in constant-time. The `Choice` -type is a wrapper around a `u8` that holds a `0` or `1`. - -```toml -subtle = "2.4" -``` - -This crate represents a “best-effort” attempt, since side-channels -are ultimately a property of a deployed cryptographic system -including the hardware it runs on, not just of software. - -The traits are implemented using bitwise operations, and should execute in -constant time provided that a) the bitwise operations are constant-time and -b) the bitwise operations are not recognized as a conditional assignment and -optimized back into a branch. - -For a compiler to recognize that bitwise operations represent a conditional -assignment, it needs to know that the value used to generate the bitmasks is -really a boolean `i1` rather than an `i8` byte value. In an attempt to -prevent this refinement, the crate tries to hide the value of a `Choice`'s -inner `u8` by passing it through a volatile read. For more information, see -the _About_ section below. - -Versions prior to `2.2` recommended use of the `nightly` feature to enable an -optimization barrier; this is not required in versions `2.2` and above. - -Note: the `subtle` crate contains `debug_assert`s to check invariants during -debug builds. These invariant checks involve secret-dependent branches, and -are not present when compiled in release mode. This crate is intended to be -used in release mode. - -## Documentation - -Documentation is available [here][docs]. - -## Minimum Supported Rust Version - -Rust **1.41** or higher. - -Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. - -## About - -This library aims to be the Rust equivalent of Go’s `crypto/subtle` module. - -The optimization barrier in `impl From for Choice` was based on Tim -Maclean's [work on `rust-timing-shield`][rust-timing-shield], which attempts to -provide a more comprehensive approach for preventing software side-channels in -Rust code. - -`subtle` is authored by isis agora lovecruft and Henry de Valence. - -## Warning - -This code is a low-level library, intended for specific use-cases implementing -cryptographic protocols. It represents a best-effort attempt to protect -against some software side-channels. Because side-channel resistance is not a -property of software alone, but of software together with hardware, any such -effort is fundamentally limited. - -**USE AT YOUR OWN RISK** - -[docs]: https://docs.rs/subtle -[rust-timing-shield]: https://www.chosenplaintext.ca/open-source/rust-timing-shield/security diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/src/lib.rs deleted file mode 100644 index 27d05eeaef63..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/src/lib.rs +++ /dev/null @@ -1,864 +0,0 @@ -// -*- mode: rust; -*- -// -// This file is part of subtle, part of the dalek cryptography project. -// Copyright (c) 2016-2018 isis lovecruft, Henry de Valence -// See LICENSE for licensing information. -// -// Authors: -// - isis agora lovecruft -// - Henry de Valence - -#![no_std] -#![deny(missing_docs)] -#![doc(html_logo_url = "https://doc.dalek.rs/assets/dalek-logo-clear.png")] -#![doc(html_root_url = "https://docs.rs/subtle/2.4.1")] - -//! # subtle [![](https://img.shields.io/crates/v/subtle.svg)](https://crates.io/crates/subtle) [![](https://img.shields.io/badge/dynamic/json.svg?label=docs&uri=https%3A%2F%2Fcrates.io%2Fapi%2Fv1%2Fcrates%2Fsubtle%2Fversions&query=%24.versions%5B0%5D.num&colorB=4F74A6)](https://doc.dalek.rs/subtle) [![](https://travis-ci.org/dalek-cryptography/subtle.svg?branch=master)](https://travis-ci.org/dalek-cryptography/subtle) -//! -//! **Pure-Rust traits and utilities for constant-time cryptographic implementations.** -//! -//! It consists of a `Choice` type, and a collection of traits using `Choice` -//! instead of `bool` which are intended to execute in constant-time. The `Choice` -//! type is a wrapper around a `u8` that holds a `0` or `1`. -//! -//! ```toml -//! subtle = "2.4" -//! ``` -//! -//! This crate represents a “best-effort” attempt, since side-channels -//! are ultimately a property of a deployed cryptographic system -//! including the hardware it runs on, not just of software. -//! -//! The traits are implemented using bitwise operations, and should execute in -//! constant time provided that a) the bitwise operations are constant-time and -//! b) the bitwise operations are not recognized as a conditional assignment and -//! optimized back into a branch. -//! -//! For a compiler to recognize that bitwise operations represent a conditional -//! assignment, it needs to know that the value used to generate the bitmasks is -//! really a boolean `i1` rather than an `i8` byte value. In an attempt to -//! prevent this refinement, the crate tries to hide the value of a `Choice`'s -//! inner `u8` by passing it through a volatile read. For more information, see -//! the _About_ section below. -//! -//! Versions prior to `2.2` recommended use of the `nightly` feature to enable an -//! optimization barrier; this is not required in versions `2.2` and above. -//! -//! Note: the `subtle` crate contains `debug_assert`s to check invariants during -//! debug builds. These invariant checks involve secret-dependent branches, and -//! are not present when compiled in release mode. This crate is intended to be -//! used in release mode. -//! -//! ## Documentation -//! -//! Documentation is available [here][docs]. -//! -//! ## Minimum Supported Rust Version -//! -//! Rust **1.41** or higher. -//! -//! Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. -//! -//! ## About -//! -//! This library aims to be the Rust equivalent of Go’s `crypto/subtle` module. -//! -//! The optimization barrier in `impl From for Choice` was based on Tim -//! Maclean's [work on `rust-timing-shield`][rust-timing-shield], which attempts to -//! provide a more comprehensive approach for preventing software side-channels in -//! Rust code. -//! -//! `subtle` is authored by isis agora lovecruft and Henry de Valence. -//! -//! ## Warning -//! -//! This code is a low-level library, intended for specific use-cases implementing -//! cryptographic protocols. It represents a best-effort attempt to protect -//! against some software side-channels. Because side-channel resistance is not a -//! property of software alone, but of software together with hardware, any such -//! effort is fundamentally limited. -//! -//! **USE AT YOUR OWN RISK** -//! -//! [docs]: https://docs.rs/subtle -//! [rust-timing-shield]: https://www.chosenplaintext.ca/open-source/rust-timing-shield/security - -#[cfg(feature = "std")] -#[macro_use] -extern crate std; - -use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Neg, Not}; -use core::option::Option; - -/// The `Choice` struct represents a choice for use in conditional assignment. -/// -/// It is a wrapper around a `u8`, which should have the value either `1` (true) -/// or `0` (false). -/// -/// The conversion from `u8` to `Choice` passes the value through an optimization -/// barrier, as a best-effort attempt to prevent the compiler from inferring that -/// the `Choice` value is a boolean. This strategy is based on Tim Maclean's -/// [work on `rust-timing-shield`][rust-timing-shield], which attempts to provide -/// a more comprehensive approach for preventing software side-channels in Rust -/// code. -/// -/// The `Choice` struct implements operators for AND, OR, XOR, and NOT, to allow -/// combining `Choice` values. These operations do not short-circuit. -/// -/// [rust-timing-shield]: -/// https://www.chosenplaintext.ca/open-source/rust-timing-shield/security -#[derive(Copy, Clone, Debug)] -pub struct Choice(u8); - -impl Choice { - /// Unwrap the `Choice` wrapper to reveal the underlying `u8`. - /// - /// # Note - /// - /// This function only exists as an **escape hatch** for the rare case - /// where it's not possible to use one of the `subtle`-provided - /// trait impls. - /// - /// **To convert a `Choice` to a `bool`, use the `From` implementation instead.** - #[inline] - pub fn unwrap_u8(&self) -> u8 { - self.0 - } -} - -impl From for bool { - /// Convert the `Choice` wrapper into a `bool`, depending on whether - /// the underlying `u8` was a `0` or a `1`. - /// - /// # Note - /// - /// This function exists to avoid having higher-level cryptographic protocol - /// implementations duplicating this pattern. - /// - /// The intended use case for this conversion is at the _end_ of a - /// higher-level primitive implementation: for example, in checking a keyed - /// MAC, where the verification should happen in constant-time (and thus use - /// a `Choice`) but it is safe to return a `bool` at the end of the - /// verification. - #[inline] - fn from(source: Choice) -> bool { - debug_assert!((source.0 == 0u8) | (source.0 == 1u8)); - source.0 != 0 - } -} - -impl BitAnd for Choice { - type Output = Choice; - #[inline] - fn bitand(self, rhs: Choice) -> Choice { - (self.0 & rhs.0).into() - } -} - -impl BitAndAssign for Choice { - #[inline] - fn bitand_assign(&mut self, rhs: Choice) { - *self = *self & rhs; - } -} - -impl BitOr for Choice { - type Output = Choice; - #[inline] - fn bitor(self, rhs: Choice) -> Choice { - (self.0 | rhs.0).into() - } -} - -impl BitOrAssign for Choice { - #[inline] - fn bitor_assign(&mut self, rhs: Choice) { - *self = *self | rhs; - } -} - -impl BitXor for Choice { - type Output = Choice; - #[inline] - fn bitxor(self, rhs: Choice) -> Choice { - (self.0 ^ rhs.0).into() - } -} - -impl BitXorAssign for Choice { - #[inline] - fn bitxor_assign(&mut self, rhs: Choice) { - *self = *self ^ rhs; - } -} - -impl Not for Choice { - type Output = Choice; - #[inline] - fn not(self) -> Choice { - (1u8 & (!self.0)).into() - } -} - -/// This function is a best-effort attempt to prevent the compiler from knowing -/// anything about the value of the returned `u8`, other than its type. -/// -/// Because we want to support stable Rust, we don't have access to inline -/// assembly or test::black_box, so we use the fact that volatile values will -/// never be elided to register values. -/// -/// Note: Rust's notion of "volatile" is subject to change over time. While this -/// code may break in a non-destructive way in the future, “constant-time” code -/// is a continually moving target, and this is better than doing nothing. -#[inline(never)] -fn black_box(input: u8) -> u8 { - debug_assert!((input == 0u8) | (input == 1u8)); - - unsafe { - // Optimization barrier - // - // Unsafe is ok, because: - // - &input is not NULL; - // - size of input is not zero; - // - u8 is neither Sync, nor Send; - // - u8 is Copy, so input is always live; - // - u8 type is always properly aligned. - core::ptr::read_volatile(&input as *const u8) - } -} - -impl From for Choice { - #[inline] - fn from(input: u8) -> Choice { - // Our goal is to prevent the compiler from inferring that the value held inside the - // resulting `Choice` struct is really an `i1` instead of an `i8`. - Choice(black_box(input)) - } -} - -/// An `Eq`-like trait that produces a `Choice` instead of a `bool`. -/// -/// # Example -/// -/// ``` -/// use subtle::ConstantTimeEq; -/// let x: u8 = 5; -/// let y: u8 = 13; -/// -/// assert_eq!(x.ct_eq(&y).unwrap_u8(), 0); -/// assert_eq!(x.ct_eq(&x).unwrap_u8(), 1); -/// ``` -pub trait ConstantTimeEq { - /// Determine if two items are equal. - /// - /// The `ct_eq` function should execute in constant time. - /// - /// # Returns - /// - /// * `Choice(1u8)` if `self == other`; - /// * `Choice(0u8)` if `self != other`. - #[inline] - fn ct_eq(&self, other: &Self) -> Choice; -} - -impl ConstantTimeEq for [T] { - /// Check whether two slices of `ConstantTimeEq` types are equal. - /// - /// # Note - /// - /// This function short-circuits if the lengths of the input slices - /// are different. Otherwise, it should execute in time independent - /// of the slice contents. - /// - /// Since arrays coerce to slices, this function works with fixed-size arrays: - /// - /// ``` - /// # use subtle::ConstantTimeEq; - /// # - /// let a: [u8; 8] = [0,1,2,3,4,5,6,7]; - /// let b: [u8; 8] = [0,1,2,3,0,1,2,3]; - /// - /// let a_eq_a = a.ct_eq(&a); - /// let a_eq_b = a.ct_eq(&b); - /// - /// assert_eq!(a_eq_a.unwrap_u8(), 1); - /// assert_eq!(a_eq_b.unwrap_u8(), 0); - /// ``` - #[inline] - fn ct_eq(&self, _rhs: &[T]) -> Choice { - let len = self.len(); - - // Short-circuit on the *lengths* of the slices, not their - // contents. - if len != _rhs.len() { - return Choice::from(0); - } - - // This loop shouldn't be shortcircuitable, since the compiler - // shouldn't be able to reason about the value of the `u8` - // unwrapped from the `ct_eq` result. - let mut x = 1u8; - for (ai, bi) in self.iter().zip(_rhs.iter()) { - x &= ai.ct_eq(bi).unwrap_u8(); - } - - x.into() - } -} - -impl ConstantTimeEq for Choice { - #[inline] - fn ct_eq(&self, rhs: &Choice) -> Choice { - !(*self ^ *rhs) - } -} - -/// Given the bit-width `$bit_width` and the corresponding primitive -/// unsigned and signed types `$t_u` and `$t_i` respectively, generate -/// an `ConstantTimeEq` implementation. -macro_rules! generate_integer_equal { - ($t_u:ty, $t_i:ty, $bit_width:expr) => { - impl ConstantTimeEq for $t_u { - #[inline] - fn ct_eq(&self, other: &$t_u) -> Choice { - // x == 0 if and only if self == other - let x: $t_u = self ^ other; - - // If x == 0, then x and -x are both equal to zero; - // otherwise, one or both will have its high bit set. - let y: $t_u = (x | x.wrapping_neg()) >> ($bit_width - 1); - - // Result is the opposite of the high bit (now shifted to low). - ((y ^ (1 as $t_u)) as u8).into() - } - } - impl ConstantTimeEq for $t_i { - #[inline] - fn ct_eq(&self, other: &$t_i) -> Choice { - // Bitcast to unsigned and call that implementation. - (*self as $t_u).ct_eq(&(*other as $t_u)) - } - } - }; -} - -generate_integer_equal!(u8, i8, 8); -generate_integer_equal!(u16, i16, 16); -generate_integer_equal!(u32, i32, 32); -generate_integer_equal!(u64, i64, 64); -#[cfg(feature = "i128")] -generate_integer_equal!(u128, i128, 128); -generate_integer_equal!(usize, isize, ::core::mem::size_of::() * 8); - -/// A type which can be conditionally selected in constant time. -/// -/// This trait also provides generic implementations of conditional -/// assignment and conditional swaps. -pub trait ConditionallySelectable: Copy { - /// Select `a` or `b` according to `choice`. - /// - /// # Returns - /// - /// * `a` if `choice == Choice(0)`; - /// * `b` if `choice == Choice(1)`. - /// - /// This function should execute in constant time. - /// - /// # Example - /// - /// ``` - /// # extern crate subtle; - /// use subtle::ConditionallySelectable; - /// # - /// # fn main() { - /// let x: u8 = 13; - /// let y: u8 = 42; - /// - /// let z = u8::conditional_select(&x, &y, 0.into()); - /// assert_eq!(z, x); - /// let z = u8::conditional_select(&x, &y, 1.into()); - /// assert_eq!(z, y); - /// # } - /// ``` - #[inline] - fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self; - - /// Conditionally assign `other` to `self`, according to `choice`. - /// - /// This function should execute in constant time. - /// - /// # Example - /// - /// ``` - /// # extern crate subtle; - /// use subtle::ConditionallySelectable; - /// # - /// # fn main() { - /// let mut x: u8 = 13; - /// let mut y: u8 = 42; - /// - /// x.conditional_assign(&y, 0.into()); - /// assert_eq!(x, 13); - /// x.conditional_assign(&y, 1.into()); - /// assert_eq!(x, 42); - /// # } - /// ``` - #[inline] - fn conditional_assign(&mut self, other: &Self, choice: Choice) { - *self = Self::conditional_select(self, other, choice); - } - - /// Conditionally swap `self` and `other` if `choice == 1`; otherwise, - /// reassign both unto themselves. - /// - /// This function should execute in constant time. - /// - /// # Example - /// - /// ``` - /// # extern crate subtle; - /// use subtle::ConditionallySelectable; - /// # - /// # fn main() { - /// let mut x: u8 = 13; - /// let mut y: u8 = 42; - /// - /// u8::conditional_swap(&mut x, &mut y, 0.into()); - /// assert_eq!(x, 13); - /// assert_eq!(y, 42); - /// u8::conditional_swap(&mut x, &mut y, 1.into()); - /// assert_eq!(x, 42); - /// assert_eq!(y, 13); - /// # } - /// ``` - #[inline] - fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { - let t: Self = *a; - a.conditional_assign(&b, choice); - b.conditional_assign(&t, choice); - } -} - -macro_rules! to_signed_int { - (u8) => { - i8 - }; - (u16) => { - i16 - }; - (u32) => { - i32 - }; - (u64) => { - i64 - }; - (u128) => { - i128 - }; - (i8) => { - i8 - }; - (i16) => { - i16 - }; - (i32) => { - i32 - }; - (i64) => { - i64 - }; - (i128) => { - i128 - }; -} - -macro_rules! generate_integer_conditional_select { - ($($t:tt)*) => ($( - impl ConditionallySelectable for $t { - #[inline] - fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { - // if choice = 0, mask = (-0) = 0000...0000 - // if choice = 1, mask = (-1) = 1111...1111 - let mask = -(choice.unwrap_u8() as to_signed_int!($t)) as $t; - a ^ (mask & (a ^ b)) - } - - #[inline] - fn conditional_assign(&mut self, other: &Self, choice: Choice) { - // if choice = 0, mask = (-0) = 0000...0000 - // if choice = 1, mask = (-1) = 1111...1111 - let mask = -(choice.unwrap_u8() as to_signed_int!($t)) as $t; - *self ^= mask & (*self ^ *other); - } - - #[inline] - fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { - // if choice = 0, mask = (-0) = 0000...0000 - // if choice = 1, mask = (-1) = 1111...1111 - let mask = -(choice.unwrap_u8() as to_signed_int!($t)) as $t; - let t = mask & (*a ^ *b); - *a ^= t; - *b ^= t; - } - } - )*) -} - -generate_integer_conditional_select!( u8 i8); -generate_integer_conditional_select!( u16 i16); -generate_integer_conditional_select!( u32 i32); -generate_integer_conditional_select!( u64 i64); -#[cfg(feature = "i128")] -generate_integer_conditional_select!(u128 i128); - -impl ConditionallySelectable for Choice { - #[inline] - fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { - Choice(u8::conditional_select(&a.0, &b.0, choice)) - } -} - -/// A type which can be conditionally negated in constant time. -/// -/// # Note -/// -/// A generic implementation of `ConditionallyNegatable` is provided -/// for types `T` which are `ConditionallySelectable` and have `Neg` -/// implemented on `&T`. -pub trait ConditionallyNegatable { - /// Negate `self` if `choice == Choice(1)`; otherwise, leave it - /// unchanged. - /// - /// This function should execute in constant time. - #[inline] - fn conditional_negate(&mut self, choice: Choice); -} - -impl ConditionallyNegatable for T -where - T: ConditionallySelectable, - for<'a> &'a T: Neg, -{ - #[inline] - fn conditional_negate(&mut self, choice: Choice) { - // Need to cast to eliminate mutability - let self_neg: T = -(self as &T); - self.conditional_assign(&self_neg, choice); - } -} - -/// The `CtOption` type represents an optional value similar to the -/// [`Option`](core::option::Option) type but is intended for -/// use in constant time APIs. -/// -/// Any given `CtOption` is either `Some` or `None`, but unlike -/// `Option` these variants are not exposed. The -/// [`is_some()`](CtOption::is_some) method is used to determine if -/// the value is `Some`, and [`unwrap_or()`](CtOption::unwrap_or) and -/// [`unwrap_or_else()`](CtOption::unwrap_or_else) methods are -/// provided to access the underlying value. The value can also be -/// obtained with [`unwrap()`](CtOption::unwrap) but this will panic -/// if it is `None`. -/// -/// Functions that are intended to be constant time may not produce -/// valid results for all inputs, such as square root and inversion -/// operations in finite field arithmetic. Returning an `Option` -/// from these functions makes it difficult for the caller to reason -/// about the result in constant time, and returning an incorrect -/// value burdens the caller and increases the chance of bugs. -#[derive(Clone, Copy, Debug)] -pub struct CtOption { - value: T, - is_some: Choice, -} - -impl From> for Option { - /// Convert the `CtOption` wrapper into an `Option`, depending on whether - /// the underlying `is_some` `Choice` was a `0` or a `1` once unwrapped. - /// - /// # Note - /// - /// This function exists to avoid ending up with ugly, verbose and/or bad handled - /// conversions from the `CtOption` wraps to an `Option` or `Result`. - /// This implementation doesn't intend to be constant-time nor try to protect the - /// leakage of the `T` since the `Option` will do it anyways. - fn from(source: CtOption) -> Option { - if source.is_some().unwrap_u8() == 1u8 { - Option::Some(source.value) - } else { - None - } - } -} - -impl CtOption { - /// This method is used to construct a new `CtOption` and takes - /// a value of type `T`, and a `Choice` that determines whether - /// the optional value should be `Some` or not. If `is_some` is - /// false, the value will still be stored but its value is never - /// exposed. - #[inline] - pub fn new(value: T, is_some: Choice) -> CtOption { - CtOption { - value: value, - is_some: is_some, - } - } - - /// This returns the underlying value but panics if it - /// is not `Some`. - #[inline] - pub fn unwrap(self) -> T { - assert_eq!(self.is_some.unwrap_u8(), 1); - - self.value - } - - /// This returns the underlying value if it is `Some` - /// or the provided value otherwise. - #[inline] - pub fn unwrap_or(self, def: T) -> T - where - T: ConditionallySelectable, - { - T::conditional_select(&def, &self.value, self.is_some) - } - - /// This returns the underlying value if it is `Some` - /// or the value produced by the provided closure otherwise. - #[inline] - pub fn unwrap_or_else(self, f: F) -> T - where - T: ConditionallySelectable, - F: FnOnce() -> T, - { - T::conditional_select(&f(), &self.value, self.is_some) - } - - /// Returns a true `Choice` if this value is `Some`. - #[inline] - pub fn is_some(&self) -> Choice { - self.is_some - } - - /// Returns a true `Choice` if this value is `None`. - #[inline] - pub fn is_none(&self) -> Choice { - !self.is_some - } - - /// Returns a `None` value if the option is `None`, otherwise - /// returns a `CtOption` enclosing the value of the provided closure. - /// The closure is given the enclosed value or, if the option is - /// `None`, it is provided a dummy value computed using - /// `Default::default()`. - /// - /// This operates in constant time, because the provided closure - /// is always called. - #[inline] - pub fn map(self, f: F) -> CtOption - where - T: Default + ConditionallySelectable, - F: FnOnce(T) -> U, - { - CtOption::new( - f(T::conditional_select( - &T::default(), - &self.value, - self.is_some, - )), - self.is_some, - ) - } - - /// Returns a `None` value if the option is `None`, otherwise - /// returns the result of the provided closure. The closure is - /// given the enclosed value or, if the option is `None`, it - /// is provided a dummy value computed using `Default::default()`. - /// - /// This operates in constant time, because the provided closure - /// is always called. - #[inline] - pub fn and_then(self, f: F) -> CtOption - where - T: Default + ConditionallySelectable, - F: FnOnce(T) -> CtOption, - { - let mut tmp = f(T::conditional_select( - &T::default(), - &self.value, - self.is_some, - )); - tmp.is_some &= self.is_some; - - tmp - } - - /// Returns `self` if it contains a value, and otherwise returns the result of - /// calling `f`. The provided function `f` is always called. - #[inline] - pub fn or_else(self, f: F) -> CtOption - where - T: ConditionallySelectable, - F: FnOnce() -> CtOption, - { - let is_none = self.is_none(); - let f = f(); - - Self::conditional_select(&self, &f, is_none) - } -} - -impl ConditionallySelectable for CtOption { - fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { - CtOption::new( - T::conditional_select(&a.value, &b.value, choice), - Choice::conditional_select(&a.is_some, &b.is_some, choice), - ) - } -} - -impl ConstantTimeEq for CtOption { - /// Two `CtOption`s are equal if they are both `Some` and - /// their values are equal, or both `None`. - #[inline] - fn ct_eq(&self, rhs: &CtOption) -> Choice { - let a = self.is_some(); - let b = rhs.is_some(); - - (a & b & self.value.ct_eq(&rhs.value)) | (!a & !b) - } -} - -/// A type which can be compared in some manner and be determined to be greater -/// than another of the same type. -pub trait ConstantTimeGreater { - /// Determine whether `self > other`. - /// - /// The bitwise-NOT of the return value of this function should be usable to - /// determine if `self <= other`. - /// - /// This function should execute in constant time. - /// - /// # Returns - /// - /// A `Choice` with a set bit if `self > other`, and with no set bits - /// otherwise. - /// - /// # Example - /// - /// ``` - /// # extern crate subtle; - /// use subtle::ConstantTimeGreater; - /// - /// let x: u8 = 13; - /// let y: u8 = 42; - /// - /// let x_gt_y = x.ct_gt(&y); - /// - /// assert_eq!(x_gt_y.unwrap_u8(), 0); - /// - /// let y_gt_x = y.ct_gt(&x); - /// - /// assert_eq!(y_gt_x.unwrap_u8(), 1); - /// - /// let x_gt_x = x.ct_gt(&x); - /// - /// assert_eq!(x_gt_x.unwrap_u8(), 0); - /// ``` - fn ct_gt(&self, other: &Self) -> Choice; -} - -macro_rules! generate_unsigned_integer_greater { - ($t_u: ty, $bit_width: expr) => { - impl ConstantTimeGreater for $t_u { - /// Returns Choice::from(1) iff x > y, and Choice::from(0) iff x <= y. - /// - /// # Note - /// - /// This algoritm would also work for signed integers if we first - /// flip the top bit, e.g. `let x: u8 = x ^ 0x80`, etc. - #[inline] - fn ct_gt(&self, other: &$t_u) -> Choice { - let gtb = self & !other; // All the bits in self that are greater than their corresponding bits in other. - let mut ltb = !self & other; // All the bits in self that are less than their corresponding bits in other. - let mut pow = 1; - - // Less-than operator is okay here because it's dependent on the bit-width. - while pow < $bit_width { - ltb |= ltb >> pow; // Bit-smear the highest set bit to the right. - pow += pow; - } - let mut bit = gtb & !ltb; // Select the highest set bit. - let mut pow = 1; - - while pow < $bit_width { - bit |= bit >> pow; // Shift it to the right until we end up with either 0 or 1. - pow += pow; - } - // XXX We should possibly do the above flattening to 0 or 1 in the - // Choice constructor rather than making it a debug error? - Choice::from((bit & 1) as u8) - } - } - } -} - -generate_unsigned_integer_greater!(u8, 8); -generate_unsigned_integer_greater!(u16, 16); -generate_unsigned_integer_greater!(u32, 32); -generate_unsigned_integer_greater!(u64, 64); -#[cfg(feature = "i128")] -generate_unsigned_integer_greater!(u128, 128); - -/// A type which can be compared in some manner and be determined to be less -/// than another of the same type. -pub trait ConstantTimeLess: ConstantTimeEq + ConstantTimeGreater { - /// Determine whether `self < other`. - /// - /// The bitwise-NOT of the return value of this function should be usable to - /// determine if `self >= other`. - /// - /// A default implementation is provided and implemented for the unsigned - /// integer types. - /// - /// This function should execute in constant time. - /// - /// # Returns - /// - /// A `Choice` with a set bit if `self < other`, and with no set bits - /// otherwise. - /// - /// # Example - /// - /// ``` - /// # extern crate subtle; - /// use subtle::ConstantTimeLess; - /// - /// let x: u8 = 13; - /// let y: u8 = 42; - /// - /// let x_lt_y = x.ct_lt(&y); - /// - /// assert_eq!(x_lt_y.unwrap_u8(), 1); - /// - /// let y_lt_x = y.ct_lt(&x); - /// - /// assert_eq!(y_lt_x.unwrap_u8(), 0); - /// - /// let x_lt_x = x.ct_lt(&x); - /// - /// assert_eq!(x_lt_x.unwrap_u8(), 0); - /// ``` - #[inline] - fn ct_lt(&self, other: &Self) -> Choice { - !self.ct_gt(other) & !self.ct_eq(other) - } -} - -impl ConstantTimeLess for u8 {} -impl ConstantTimeLess for u16 {} -impl ConstantTimeLess for u32 {} -impl ConstantTimeLess for u64 {} -#[cfg(feature = "i128")] -impl ConstantTimeLess for u128 {} diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/tests/mod.rs b/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/tests/mod.rs deleted file mode 100644 index eaa98a46037d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/tests/mod.rs +++ /dev/null @@ -1,389 +0,0 @@ -extern crate rand; -extern crate subtle; - -use rand::rngs::OsRng; -use rand::RngCore; - -use subtle::*; - -#[test] -#[should_panic] -fn slices_equal_different_lengths() { - let a: [u8; 3] = [0, 0, 0]; - let b: [u8; 4] = [0, 0, 0, 0]; - - assert_eq!((&a).ct_eq(&b).unwrap_u8(), 1); -} - -#[test] -fn slices_equal() { - let a: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u8; 8] = [1, 2, 3, 4, 4, 3, 2, 1]; - - let a_eq_a = (&a).ct_eq(&a); - let a_eq_b = (&a).ct_eq(&b); - - assert_eq!(a_eq_a.unwrap_u8(), 1); - assert_eq!(a_eq_b.unwrap_u8(), 0); - - let c: [u8; 16] = [0u8; 16]; - - let a_eq_c = (&a).ct_eq(&c); - assert_eq!(a_eq_c.unwrap_u8(), 0); -} - -#[test] -fn conditional_assign_i32() { - let mut a: i32 = 5; - let b: i32 = 13; - - a.conditional_assign(&b, 0.into()); - assert_eq!(a, 5); - a.conditional_assign(&b, 1.into()); - assert_eq!(a, 13); -} - -#[test] -fn conditional_assign_i64() { - let mut c: i64 = 2343249123; - let d: i64 = 8723884895; - - c.conditional_assign(&d, 0.into()); - assert_eq!(c, 2343249123); - c.conditional_assign(&d, 1.into()); - assert_eq!(c, 8723884895); -} - -macro_rules! generate_integer_conditional_select_tests { - ($($t:ty)*) => ($( - let x: $t = 0; // all 0 bits - let y: $t = !0; // all 1 bits - - assert_eq!(<$t>::conditional_select(&x, &y, 0.into()), 0); - assert_eq!(<$t>::conditional_select(&x, &y, 1.into()), y); - - let mut z = x; - let mut w = y; - - <$t>::conditional_swap(&mut z, &mut w, 0.into()); - assert_eq!(z, x); - assert_eq!(w, y); - <$t>::conditional_swap(&mut z, &mut w, 1.into()); - assert_eq!(z, y); - assert_eq!(w, x); - - z.conditional_assign(&x, 1.into()); - w.conditional_assign(&y, 0.into()); - assert_eq!(z, x); - assert_eq!(w, x); - )*) -} - -#[test] -fn integer_conditional_select() { - generate_integer_conditional_select_tests!(u8 u16 u32 u64); - generate_integer_conditional_select_tests!(i8 i16 i32 i64); - #[cfg(feature = "i128")] - generate_integer_conditional_select_tests!(i128 u128); -} - -#[test] -fn custom_conditional_select_i16() { - let x: i16 = 257; - let y: i16 = 514; - - assert_eq!(i16::conditional_select(&x, &y, 0.into()), 257); - assert_eq!(i16::conditional_select(&x, &y, 1.into()), 514); -} - -macro_rules! generate_integer_equal_tests { - ($($t:ty),*) => ($( - let y: $t = 0; // all 0 bits - let z: $t = !0; // all 1 bits - - let x = z; - - assert_eq!(x.ct_eq(&y).unwrap_u8(), 0); - assert_eq!(x.ct_eq(&z).unwrap_u8(), 1); - )*) -} - -#[test] -fn integer_equal() { - generate_integer_equal_tests!(u8, u16, u32, u64); - generate_integer_equal_tests!(i8, i16, i32, i64); - #[cfg(feature = "i128")] - generate_integer_equal_tests!(i128, u128); - generate_integer_equal_tests!(isize, usize); -} - -#[test] -fn choice_into_bool() { - let choice_true: bool = Choice::from(1).into(); - - assert!(choice_true); - - let choice_false: bool = Choice::from(0).into(); - - assert!(!choice_false); -} - -#[test] -fn conditional_select_choice() { - let t = Choice::from(1); - let f = Choice::from(0); - - assert_eq!(bool::from(Choice::conditional_select(&t, &f, f)), true); - assert_eq!(bool::from(Choice::conditional_select(&t, &f, t)), false); - assert_eq!(bool::from(Choice::conditional_select(&f, &t, f)), false); - assert_eq!(bool::from(Choice::conditional_select(&f, &t, t)), true); -} - -#[test] -fn choice_equal() { - assert!(Choice::from(0).ct_eq(&Choice::from(0)).unwrap_u8() == 1); - assert!(Choice::from(0).ct_eq(&Choice::from(1)).unwrap_u8() == 0); - assert!(Choice::from(1).ct_eq(&Choice::from(0)).unwrap_u8() == 0); - assert!(Choice::from(1).ct_eq(&Choice::from(1)).unwrap_u8() == 1); -} - -#[test] -fn test_ctoption() { - let a = CtOption::new(10, Choice::from(1)); - let b = CtOption::new(9, Choice::from(1)); - let c = CtOption::new(10, Choice::from(0)); - let d = CtOption::new(9, Choice::from(0)); - - // Test is_some / is_none - assert!(bool::from(a.is_some())); - assert!(bool::from(!a.is_none())); - assert!(bool::from(b.is_some())); - assert!(bool::from(!b.is_none())); - assert!(bool::from(!c.is_some())); - assert!(bool::from(c.is_none())); - assert!(bool::from(!d.is_some())); - assert!(bool::from(d.is_none())); - - // Test unwrap for Some - assert_eq!(a.unwrap(), 10); - assert_eq!(b.unwrap(), 9); - - // Test equality - assert!(bool::from(a.ct_eq(&a))); - assert!(bool::from(!a.ct_eq(&b))); - assert!(bool::from(!a.ct_eq(&c))); - assert!(bool::from(!a.ct_eq(&d))); - - // Test equality of None with different - // dummy value - assert!(bool::from(c.ct_eq(&d))); - - // Test unwrap_or - assert_eq!(CtOption::new(1, Choice::from(1)).unwrap_or(2), 1); - assert_eq!(CtOption::new(1, Choice::from(0)).unwrap_or(2), 2); - - // Test unwrap_or_else - assert_eq!(CtOption::new(1, Choice::from(1)).unwrap_or_else(|| 2), 1); - assert_eq!(CtOption::new(1, Choice::from(0)).unwrap_or_else(|| 2), 2); - - // Test map - assert_eq!( - CtOption::new(1, Choice::from(1)) - .map(|v| { - assert_eq!(v, 1); - 2 - }) - .unwrap(), - 2 - ); - assert_eq!( - CtOption::new(1, Choice::from(0)) - .map(|_| 2) - .is_none() - .unwrap_u8(), - 1 - ); - - // Test and_then - assert_eq!( - CtOption::new(1, Choice::from(1)) - .and_then(|v| { - assert_eq!(v, 1); - CtOption::new(2, Choice::from(0)) - }) - .is_none() - .unwrap_u8(), - 1 - ); - assert_eq!( - CtOption::new(1, Choice::from(1)) - .and_then(|v| { - assert_eq!(v, 1); - CtOption::new(2, Choice::from(1)) - }) - .unwrap(), - 2 - ); - - assert_eq!( - CtOption::new(1, Choice::from(0)) - .and_then(|_| CtOption::new(2, Choice::from(0))) - .is_none() - .unwrap_u8(), - 1 - ); - assert_eq!( - CtOption::new(1, Choice::from(0)) - .and_then(|_| CtOption::new(2, Choice::from(1))) - .is_none() - .unwrap_u8(), - 1 - ); - - // Test or_else - assert_eq!( - CtOption::new(1, Choice::from(0)) - .or_else(|| CtOption::new(2, Choice::from(1))) - .unwrap(), - 2 - ); - assert_eq!( - CtOption::new(1, Choice::from(1)) - .or_else(|| CtOption::new(2, Choice::from(0))) - .unwrap(), - 1 - ); - assert_eq!( - CtOption::new(1, Choice::from(1)) - .or_else(|| CtOption::new(2, Choice::from(1))) - .unwrap(), - 1 - ); - assert!(bool::from( - CtOption::new(1, Choice::from(0)) - .or_else(|| CtOption::new(2, Choice::from(0))) - .is_none() - )); - - // Test (in)equality - assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(1, Choice::from(1))).unwrap_u8() == 0); - assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(1, Choice::from(0))).unwrap_u8() == 0); - assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(2, Choice::from(1))).unwrap_u8() == 0); - assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(2, Choice::from(0))).unwrap_u8() == 0); - assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(1, Choice::from(0))).unwrap_u8() == 1); - assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(2, Choice::from(0))).unwrap_u8() == 1); - assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(2, Choice::from(1))).unwrap_u8() == 0); - assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(2, Choice::from(1))).unwrap_u8() == 0); - assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(1, Choice::from(1))).unwrap_u8() == 1); - assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(1, Choice::from(1))).unwrap_u8() == 1); -} - -#[test] -#[should_panic] -fn unwrap_none_ctoption() { - // This test might fail (in release mode?) if the - // compiler decides to optimize it away. - CtOption::new(10, Choice::from(0)).unwrap(); -} - -macro_rules! generate_greater_than_test { - ($ty: ty) => { - for _ in 0..100 { - let x = OsRng.next_u64() as $ty; - let y = OsRng.next_u64() as $ty; - let z = x.ct_gt(&y); - - println!("x={}, y={}, z={:?}", x, y, z); - - if x < y { - assert!(z.unwrap_u8() == 0); - } else if x == y { - assert!(z.unwrap_u8() == 0); - } else if x > y { - assert!(z.unwrap_u8() == 1); - } - } - } -} - -#[test] -fn greater_than_u8() { - generate_greater_than_test!(u8); -} - -#[test] -fn greater_than_u16() { - generate_greater_than_test!(u16); -} - -#[test] -fn greater_than_u32() { - generate_greater_than_test!(u32); -} - -#[test] -fn greater_than_u64() { - generate_greater_than_test!(u64); -} - -#[cfg(feature = "i128")] -#[test] -fn greater_than_u128() { - generate_greater_than_test!(u128); -} - -#[test] -/// Test that the two's compliment min and max, i.e. 0000...0001 < 1111...1110, -/// gives the correct result. (This fails using the bit-twiddling algorithm that -/// go/crypto/subtle uses.) -fn less_than_twos_compliment_minmax() { - let z = 1u32.ct_lt(&(2u32.pow(31)-1)); - - assert!(z.unwrap_u8() == 1); -} - -macro_rules! generate_less_than_test { - ($ty: ty) => { - for _ in 0..100 { - let x = OsRng.next_u64() as $ty; - let y = OsRng.next_u64() as $ty; - let z = x.ct_gt(&y); - - println!("x={}, y={}, z={:?}", x, y, z); - - if x < y { - assert!(z.unwrap_u8() == 0); - } else if x == y { - assert!(z.unwrap_u8() == 0); - } else if x > y { - assert!(z.unwrap_u8() == 1); - } - } - } -} - -#[test] -fn less_than_u8() { - generate_less_than_test!(u8); -} - -#[test] -fn less_than_u16() { - generate_less_than_test!(u16); -} - -#[test] -fn less_than_u32() { - generate_less_than_test!(u32); -} - -#[test] -fn less_than_u64() { - generate_less_than_test!(u64); -} - -#[cfg(feature = "i128")] -#[test] -fn less_than_u128() { - generate_less_than_test!(u128); -} diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..5265798e7a9c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "5457b5448b021d1da101ababbb854e6657233943" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.github/workflows/test.yml b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.github/workflows/test.yml new file mode 100644 index 000000000000..7c1616a301ae --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/.github/workflows/test.yml @@ -0,0 +1,64 @@ +on: [push, pull_request] + +name: Test + +jobs: + test: + name: cargo test + runs-on: ubuntu-latest + strategy: + matrix: + rust: + - stable + - beta + - nightly + - 1.60.0 + steps: + - name: checkout + uses: actions/checkout@v2 + - name: toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + target: thumbv7em-none-eabi + override: true + - name: test + uses: actions-rs/cargo@v1 + with: + command: test + - name: nightly + uses: actions-rs/cargo@v1 + with: + command: test + args: --features nightly + - name: no-default-features + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features + - name: std + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features --features std + - name: std const-generics + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features --features "std const-generics" + - name: std i128 + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features --features "std i128" + - name: std i128 const-generics + uses: actions-rs/cargo@v1 + with: + command: test + args: --no-default-features --features "std i128 const-generics" + - name: no std build + uses: actions-rs/cargo@v1 + with: + command: build + args: --no-default-features --target thumbv7em-none-eabi diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/CHANGELOG.md b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/CHANGELOG.md new file mode 100644 index 000000000000..e6a38636531f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/CHANGELOG.md @@ -0,0 +1,74 @@ +# Changelog + +Entries are listed in reverse chronological order. + +## 2.5.0 + +* Add constant-timedness note to the documentation for `CtOption::unwrap_or_else`. +* Add `CtOption::expect`. +* Add `ConstantTimeEq::ct_ne` with default implementation. +* Add new `core_hint_black_box` feature from Diane Hosfelt and Amber + Sprenkels which utilises the original `black_box` functionality from + when subtle was first written, which has now found it's way into the + Rust standard library. +* Add new `const-generics` feature from @survived which adds support + for subtle traits for generic arrays `[T; N]`. +* Add new feature for supporting `core::cmp::Ordering` for types which + implement subtle traits, patch from @tarcieri. +* Update `rand` dependency to 0.8. + +## 2.4.1 + +* Fix a bug in how the README was included in the documentation builds + which caused nightly builds to break. + +## 2.4.0 + +* Add new `ConstantTimeGreater` and `ConstantTimeLess` traits, as well + as implementations for unsigned integers, by @isislovecruft. + +## 2.3.0 + +* Add `impl ConstantTimeEq for Choice` by @tarcieri. +* Add `impl From> for Option` by @CPerezz. This is useful for + handling library code that produces `CtOption`s in contexts where timing + doesn't matter. +* Introduce an MSRV policy. + +## 2.2.3 + +* Remove the `nightly`-only asm-based `black_box` barrier in favor of the + volatile-based one, fixing compilation on current nightlies. + +## 2.2.2 + +* Update README.md to clarify that 2.2 and above do not require the `nightly` + feature. + +## 2.2.1 + +* Adds an `or_else` combinator for `CtOption`, by @ebfull. +* Optimized `black_box` for `nightly`, by @jethrogb. +* Optimized `black_box` for `stable`, by @dsprenkels. +* Fixed CI for `no_std`, by @dsprenkels. +* Fixed fuzz target compilation, by @3for. + +## 2.2.0 + +* Error during `cargo publish`, yanked. + +## 2.1.1 + +* Adds the "crypto" tag to crate metadata. +* New shorter, more efficient ct_eq() for integers, contributed by Thomas Pornin. + +## 2.1.0 + +* Adds a new `CtOption` which acts as a constant-time `Option` + (thanks to @ebfull for the implementation). +* `Choice` now itself implements `ConditionallySelectable`. + +## 2.0.0 + +* Stable version with traits reworked from 1.0.0 to interact better + with the orphan rules. diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/CONTRIBUTING.md b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/CONTRIBUTING.md similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/subtle-2.4.1/CONTRIBUTING.md rename to third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/CONTRIBUTING.md diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/Cargo.toml new file mode 100644 index 000000000000..bf78f6f60645 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/Cargo.toml @@ -0,0 +1,70 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "subtle" +version = "2.6.1" +authors = [ + "Isis Lovecruft ", + "Henry de Valence ", +] +build = false +exclude = [ + "**/.gitignore", + ".travis.yml", +] +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Pure-Rust traits and utilities for constant-time cryptographic implementations." +homepage = "https://dalek.rs/" +documentation = "https://docs.rs/subtle" +readme = "README.md" +keywords = [ + "cryptography", + "crypto", + "constant-time", + "utilities", +] +categories = [ + "cryptography", + "no-std", +] +license = "BSD-3-Clause" +repository = "https://github.com/dalek-cryptography/subtle" + +[lib] +name = "subtle" +path = "src/lib.rs" + +[[test]] +name = "mod" +path = "tests/mod.rs" + +[dev-dependencies.rand] +version = "0.8" + +[features] +const-generics = [] +core_hint_black_box = [] +default = [ + "std", + "i128", +] +i128 = [] +nightly = [] +std = [] + +[badges.travis-ci] +branch = "main" +repository = "dalek-cryptography/subtle" diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/Cargo.toml.orig new file mode 100644 index 000000000000..0a313eebf10c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/Cargo.toml.orig @@ -0,0 +1,39 @@ +[package] +name = "subtle" +# Before incrementing: +# - update CHANGELOG +# - update html_root_url +# - update README if necessary by semver +# - if any updates were made to the README, also update the module documentation in src/lib.rs +version = "2.6.1" +edition = "2018" +authors = ["Isis Lovecruft ", + "Henry de Valence "] +readme = "README.md" +license = "BSD-3-Clause" +repository = "https://github.com/dalek-cryptography/subtle" +homepage = "https://dalek.rs/" +documentation = "https://docs.rs/subtle" +categories = ["cryptography", "no-std"] +keywords = ["cryptography", "crypto", "constant-time", "utilities"] +description = "Pure-Rust traits and utilities for constant-time cryptographic implementations." +exclude = [ + "**/.gitignore", + ".travis.yml", +] + +[badges] +travis-ci = { repository = "dalek-cryptography/subtle", branch = "main"} + +[dev-dependencies] +rand = { version = "0.8" } + +[features] +const-generics = [] +# DEPRECATED: As of 2.5.1, this feature does nothing. +core_hint_black_box = [] +default = ["std", "i128"] +std = [] +i128 = [] +# DEPRECATED: As of 2.4.1, this feature does nothing. +nightly = [] diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/LICENSE b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/LICENSE new file mode 100644 index 000000000000..9e2751f8886f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2016-2017 Isis Agora Lovecruft, Henry de Valence. All rights reserved. +Copyright (c) 2016-2024 Isis Agora Lovecruft. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/README.md b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/README.md new file mode 100644 index 000000000000..a9eb2610a76e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/README.md @@ -0,0 +1,74 @@ +# subtle [![](https://img.shields.io/crates/v/subtle.svg)](https://crates.io/crates/subtle) [![](https://img.shields.io/badge/dynamic/json.svg?label=docs&uri=https%3A%2F%2Fcrates.io%2Fapi%2Fv1%2Fcrates%2Fsubtle%2Fversions&query=%24.versions%5B0%5D.num&colorB=4F74A6)](https://doc.dalek.rs/subtle) [![](https://travis-ci.org/dalek-cryptography/subtle.svg?branch=master)](https://travis-ci.org/dalek-cryptography/subtle) + +**Pure-Rust traits and utilities for constant-time cryptographic implementations.** + +It consists of a `Choice` type, and a collection of traits using `Choice` +instead of `bool` which are intended to execute in constant-time. The `Choice` +type is a wrapper around a `u8` that holds a `0` or `1`. + +```toml +subtle = "2.6" +``` + +This crate represents a “best-effort” attempt, since side-channels +are ultimately a property of a deployed cryptographic system +including the hardware it runs on, not just of software. + +The traits are implemented using bitwise operations, and should execute in +constant time provided that a) the bitwise operations are constant-time and +b) the bitwise operations are not recognized as a conditional assignment and +optimized back into a branch. + +For a compiler to recognize that bitwise operations represent a conditional +assignment, it needs to know that the value used to generate the bitmasks is +really a boolean `i1` rather than an `i8` byte value. In an attempt to +prevent this refinement, the crate tries to hide the value of a `Choice`'s +inner `u8` by passing it through a volatile read. For more information, see +the _About_ section below. + +Rust versions from 1.51 or higher have const generics support. You may enable +`const-generics` feautre to have `subtle` traits implemented for arrays `[T; N]`. + +Versions prior to `2.2` recommended use of the `nightly` feature to enable an +optimization barrier; this is not required in versions `2.2` and above. + +Note: the `subtle` crate contains `debug_assert`s to check invariants during +debug builds. These invariant checks involve secret-dependent branches, and +are not present when compiled in release mode. This crate is intended to be +used in release mode. + +## Documentation + +Documentation is available [here][docs]. + +## Minimum Supported Rust Version + +Rust **1.41** or higher. + +Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. + +## About + +This library aims to be the Rust equivalent of Go’s `crypto/subtle` module. + +Old versions of the optimization barrier in `impl From for Choice` were +based on Tim Maclean's [work on `rust-timing-shield`][rust-timing-shield], +which attempts to provide a more comprehensive approach for preventing +software side-channels in Rust code. +From version `2.2`, it was based on Diane Hosfelt and Amber Sprenkels' work on +"Secret Types in Rust". + +`subtle` is authored by isis agora lovecruft and Henry de Valence. + +## Warning + +This code is a low-level library, intended for specific use-cases implementing +cryptographic protocols. It represents a best-effort attempt to protect +against some software side-channels. Because side-channel resistance is not a +property of software alone, but of software together with hardware, any such +effort is fundamentally limited. + +**USE AT YOUR OWN RISK** + +[docs]: https://docs.rs/subtle +[rust-timing-shield]: https://www.chosenplaintext.ca/open-source/rust-timing-shield/security diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/src/lib.rs new file mode 100644 index 000000000000..9fc143b2fe04 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/src/lib.rs @@ -0,0 +1,1008 @@ +// -*- mode: rust; -*- +// +// This file is part of subtle, part of the dalek cryptography project. +// Copyright (c) 2016-2018 isis lovecruft, Henry de Valence +// See LICENSE for licensing information. +// +// Authors: +// - isis agora lovecruft +// - Henry de Valence + +#![no_std] +#![deny(missing_docs)] +#![doc(html_logo_url = "https://doc.dalek.rs/assets/dalek-logo-clear.png")] +#![doc(html_root_url = "https://docs.rs/subtle/2.6.0")] + +//! # subtle [![](https://img.shields.io/crates/v/subtle.svg)](https://crates.io/crates/subtle) [![](https://img.shields.io/badge/dynamic/json.svg?label=docs&uri=https%3A%2F%2Fcrates.io%2Fapi%2Fv1%2Fcrates%2Fsubtle%2Fversions&query=%24.versions%5B0%5D.num&colorB=4F74A6)](https://doc.dalek.rs/subtle) [![](https://travis-ci.org/dalek-cryptography/subtle.svg?branch=master)](https://travis-ci.org/dalek-cryptography/subtle) +//! +//! **Pure-Rust traits and utilities for constant-time cryptographic implementations.** +//! +//! It consists of a `Choice` type, and a collection of traits using `Choice` +//! instead of `bool` which are intended to execute in constant-time. The `Choice` +//! type is a wrapper around a `u8` that holds a `0` or `1`. +//! +//! ```toml +//! subtle = "2.6" +//! ``` +//! +//! This crate represents a “best-effort” attempt, since side-channels +//! are ultimately a property of a deployed cryptographic system +//! including the hardware it runs on, not just of software. +//! +//! The traits are implemented using bitwise operations, and should execute in +//! constant time provided that a) the bitwise operations are constant-time and +//! b) the bitwise operations are not recognized as a conditional assignment and +//! optimized back into a branch. +//! +//! For a compiler to recognize that bitwise operations represent a conditional +//! assignment, it needs to know that the value used to generate the bitmasks is +//! really a boolean `i1` rather than an `i8` byte value. In an attempt to +//! prevent this refinement, the crate tries to hide the value of a `Choice`'s +//! inner `u8` by passing it through a volatile read. For more information, see +//! the _About_ section below. +//! +//! Rust versions from 1.51 or higher have const generics support. You may enable +//! `const-generics` feautre to have `subtle` traits implemented for arrays `[T; N]`. +//! +//! Versions prior to `2.2` recommended use of the `nightly` feature to enable an +//! optimization barrier; this is not required in versions `2.2` and above. +//! +//! Note: the `subtle` crate contains `debug_assert`s to check invariants during +//! debug builds. These invariant checks involve secret-dependent branches, and +//! are not present when compiled in release mode. This crate is intended to be +//! used in release mode. +//! +//! ## Documentation +//! +//! Documentation is available [here][docs]. +//! +//! ## Minimum Supported Rust Version +//! +//! Rust **1.41** or higher. +//! +//! Minimum supported Rust version can be changed in the future, but it will be done with a minor version bump. +//! +//! ## About +//! +//! This library aims to be the Rust equivalent of Go’s `crypto/subtle` module. +//! +//! Old versions of the optimization barrier in `impl From for Choice` were +//! based on Tim Maclean's [work on `rust-timing-shield`][rust-timing-shield], +//! which attempts to provide a more comprehensive approach for preventing +//! software side-channels in Rust code. +//! From version `2.2`, it was based on Diane Hosfelt and Amber Sprenkels' work on +//! "Secret Types in Rust". +//! +//! `subtle` is authored by isis agora lovecruft and Henry de Valence. +//! +//! ## Warning +//! +//! This code is a low-level library, intended for specific use-cases implementing +//! cryptographic protocols. It represents a best-effort attempt to protect +//! against some software side-channels. Because side-channel resistance is not a +//! property of software alone, but of software together with hardware, any such +//! effort is fundamentally limited. +//! +//! **USE AT YOUR OWN RISK** +//! +//! [docs]: https://docs.rs/subtle +//! [rust-timing-shield]: https://www.chosenplaintext.ca/open-source/rust-timing-shield/security + +#[cfg(feature = "std")] +#[macro_use] +extern crate std; + +use core::cmp; +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Neg, Not}; +use core::option::Option; + +#[cfg(feature = "core_hint_black_box")] +use core::hint::black_box; + +/// The `Choice` struct represents a choice for use in conditional assignment. +/// +/// It is a wrapper around a `u8`, which should have the value either `1` (true) +/// or `0` (false). +/// +/// The conversion from `u8` to `Choice` passes the value through an optimization +/// barrier, as a best-effort attempt to prevent the compiler from inferring that +/// the `Choice` value is a boolean. This strategy is based on Tim Maclean's +/// [work on `rust-timing-shield`][rust-timing-shield], which attempts to provide +/// a more comprehensive approach for preventing software side-channels in Rust +/// code. +/// +/// The `Choice` struct implements operators for AND, OR, XOR, and NOT, to allow +/// combining `Choice` values. These operations do not short-circuit. +/// +/// [rust-timing-shield]: +/// https://www.chosenplaintext.ca/open-source/rust-timing-shield/security +#[derive(Copy, Clone, Debug)] +pub struct Choice(u8); + +impl Choice { + /// Unwrap the `Choice` wrapper to reveal the underlying `u8`. + /// + /// # Note + /// + /// This function only exists as an **escape hatch** for the rare case + /// where it's not possible to use one of the `subtle`-provided + /// trait impls. + /// + /// **To convert a `Choice` to a `bool`, use the `From` implementation instead.** + #[inline] + pub fn unwrap_u8(&self) -> u8 { + self.0 + } +} + +impl From for bool { + /// Convert the `Choice` wrapper into a `bool`, depending on whether + /// the underlying `u8` was a `0` or a `1`. + /// + /// # Note + /// + /// This function exists to avoid having higher-level cryptographic protocol + /// implementations duplicating this pattern. + /// + /// The intended use case for this conversion is at the _end_ of a + /// higher-level primitive implementation: for example, in checking a keyed + /// MAC, where the verification should happen in constant-time (and thus use + /// a `Choice`) but it is safe to return a `bool` at the end of the + /// verification. + #[inline] + fn from(source: Choice) -> bool { + debug_assert!((source.0 == 0u8) | (source.0 == 1u8)); + source.0 != 0 + } +} + +impl BitAnd for Choice { + type Output = Choice; + #[inline] + fn bitand(self, rhs: Choice) -> Choice { + (self.0 & rhs.0).into() + } +} + +impl BitAndAssign for Choice { + #[inline] + fn bitand_assign(&mut self, rhs: Choice) { + *self = *self & rhs; + } +} + +impl BitOr for Choice { + type Output = Choice; + #[inline] + fn bitor(self, rhs: Choice) -> Choice { + (self.0 | rhs.0).into() + } +} + +impl BitOrAssign for Choice { + #[inline] + fn bitor_assign(&mut self, rhs: Choice) { + *self = *self | rhs; + } +} + +impl BitXor for Choice { + type Output = Choice; + #[inline] + fn bitxor(self, rhs: Choice) -> Choice { + (self.0 ^ rhs.0).into() + } +} + +impl BitXorAssign for Choice { + #[inline] + fn bitxor_assign(&mut self, rhs: Choice) { + *self = *self ^ rhs; + } +} + +impl Not for Choice { + type Output = Choice; + #[inline] + fn not(self) -> Choice { + (1u8 & (!self.0)).into() + } +} + +/// This function is a best-effort attempt to prevent the compiler from knowing +/// anything about the value of the returned `u8`, other than its type. +/// +/// Because we want to support stable Rust, we don't have access to inline +/// assembly or test::black_box, so we use the fact that volatile values will +/// never be elided to register values. +/// +/// Note: Rust's notion of "volatile" is subject to change over time. While this +/// code may break in a non-destructive way in the future, “constant-time” code +/// is a continually moving target, and this is better than doing nothing. +#[cfg(not(feature = "core_hint_black_box"))] +#[inline(never)] +fn black_box(input: T) -> T { + unsafe { + // Optimization barrier + // + // SAFETY: + // - &input is not NULL because we own input; + // - input is Copy and always live; + // - input is always properly aligned. + core::ptr::read_volatile(&input) + } +} + +impl From for Choice { + #[inline] + fn from(input: u8) -> Choice { + debug_assert!((input == 0u8) | (input == 1u8)); + + // Our goal is to prevent the compiler from inferring that the value held inside the + // resulting `Choice` struct is really a `bool` instead of a `u8`. + Choice(black_box(input)) + } +} + +/// An `Eq`-like trait that produces a `Choice` instead of a `bool`. +/// +/// # Example +/// +/// ``` +/// use subtle::ConstantTimeEq; +/// let x: u8 = 5; +/// let y: u8 = 13; +/// +/// assert_eq!(x.ct_eq(&y).unwrap_u8(), 0); +/// assert_eq!(x.ct_eq(&x).unwrap_u8(), 1); +/// ``` +// +// #[inline] is specified on these function prototypes to signify that they +#[allow(unused_attributes)] // should be in the actual implementation +pub trait ConstantTimeEq { + /// Determine if two items are equal. + /// + /// The `ct_eq` function should execute in constant time. + /// + /// # Returns + /// + /// * `Choice(1u8)` if `self == other`; + /// * `Choice(0u8)` if `self != other`. + #[inline] + #[allow(unused_attributes)] + fn ct_eq(&self, other: &Self) -> Choice; + + /// Determine if two items are NOT equal. + /// + /// The `ct_ne` function should execute in constant time. + /// + /// # Returns + /// + /// * `Choice(0u8)` if `self == other`; + /// * `Choice(1u8)` if `self != other`. + #[inline] + fn ct_ne(&self, other: &Self) -> Choice { + !self.ct_eq(other) + } +} + +impl ConstantTimeEq for [T] { + /// Check whether two slices of `ConstantTimeEq` types are equal. + /// + /// # Note + /// + /// This function short-circuits if the lengths of the input slices + /// are different. Otherwise, it should execute in time independent + /// of the slice contents. + /// + /// Since arrays coerce to slices, this function works with fixed-size arrays: + /// + /// ``` + /// # use subtle::ConstantTimeEq; + /// # + /// let a: [u8; 8] = [0,1,2,3,4,5,6,7]; + /// let b: [u8; 8] = [0,1,2,3,0,1,2,3]; + /// + /// let a_eq_a = a.ct_eq(&a); + /// let a_eq_b = a.ct_eq(&b); + /// + /// assert_eq!(a_eq_a.unwrap_u8(), 1); + /// assert_eq!(a_eq_b.unwrap_u8(), 0); + /// ``` + #[inline] + fn ct_eq(&self, _rhs: &[T]) -> Choice { + let len = self.len(); + + // Short-circuit on the *lengths* of the slices, not their + // contents. + if len != _rhs.len() { + return Choice::from(0); + } + + // This loop shouldn't be shortcircuitable, since the compiler + // shouldn't be able to reason about the value of the `u8` + // unwrapped from the `ct_eq` result. + let mut x = 1u8; + for (ai, bi) in self.iter().zip(_rhs.iter()) { + x &= ai.ct_eq(bi).unwrap_u8(); + } + + x.into() + } +} + +impl ConstantTimeEq for Choice { + #[inline] + fn ct_eq(&self, rhs: &Choice) -> Choice { + !(*self ^ *rhs) + } +} + +/// Given the bit-width `$bit_width` and the corresponding primitive +/// unsigned and signed types `$t_u` and `$t_i` respectively, generate +/// an `ConstantTimeEq` implementation. +macro_rules! generate_integer_equal { + ($t_u:ty, $t_i:ty, $bit_width:expr) => { + impl ConstantTimeEq for $t_u { + #[inline] + fn ct_eq(&self, other: &$t_u) -> Choice { + // x == 0 if and only if self == other + let x: $t_u = self ^ other; + + // If x == 0, then x and -x are both equal to zero; + // otherwise, one or both will have its high bit set. + let y: $t_u = (x | x.wrapping_neg()) >> ($bit_width - 1); + + // Result is the opposite of the high bit (now shifted to low). + ((y ^ (1 as $t_u)) as u8).into() + } + } + impl ConstantTimeEq for $t_i { + #[inline] + fn ct_eq(&self, other: &$t_i) -> Choice { + // Bitcast to unsigned and call that implementation. + (*self as $t_u).ct_eq(&(*other as $t_u)) + } + } + }; +} + +generate_integer_equal!(u8, i8, 8); +generate_integer_equal!(u16, i16, 16); +generate_integer_equal!(u32, i32, 32); +generate_integer_equal!(u64, i64, 64); +#[cfg(feature = "i128")] +generate_integer_equal!(u128, i128, 128); +generate_integer_equal!(usize, isize, ::core::mem::size_of::() * 8); + +/// `Ordering` is `#[repr(i8)]` making it possible to leverage `i8::ct_eq`. +impl ConstantTimeEq for cmp::Ordering { + #[inline] + fn ct_eq(&self, other: &Self) -> Choice { + (*self as i8).ct_eq(&(*other as i8)) + } +} + +/// A type which can be conditionally selected in constant time. +/// +/// This trait also provides generic implementations of conditional +/// assignment and conditional swaps. +// +// #[inline] is specified on these function prototypes to signify that they +#[allow(unused_attributes)] // should be in the actual implementation +pub trait ConditionallySelectable: Copy { + /// Select `a` or `b` according to `choice`. + /// + /// # Returns + /// + /// * `a` if `choice == Choice(0)`; + /// * `b` if `choice == Choice(1)`. + /// + /// This function should execute in constant time. + /// + /// # Example + /// + /// ``` + /// use subtle::ConditionallySelectable; + /// # + /// # fn main() { + /// let x: u8 = 13; + /// let y: u8 = 42; + /// + /// let z = u8::conditional_select(&x, &y, 0.into()); + /// assert_eq!(z, x); + /// let z = u8::conditional_select(&x, &y, 1.into()); + /// assert_eq!(z, y); + /// # } + /// ``` + #[inline] + #[allow(unused_attributes)] + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self; + + /// Conditionally assign `other` to `self`, according to `choice`. + /// + /// This function should execute in constant time. + /// + /// # Example + /// + /// ``` + /// use subtle::ConditionallySelectable; + /// # + /// # fn main() { + /// let mut x: u8 = 13; + /// let mut y: u8 = 42; + /// + /// x.conditional_assign(&y, 0.into()); + /// assert_eq!(x, 13); + /// x.conditional_assign(&y, 1.into()); + /// assert_eq!(x, 42); + /// # } + /// ``` + #[inline] + fn conditional_assign(&mut self, other: &Self, choice: Choice) { + *self = Self::conditional_select(self, other, choice); + } + + /// Conditionally swap `self` and `other` if `choice == 1`; otherwise, + /// reassign both unto themselves. + /// + /// This function should execute in constant time. + /// + /// # Example + /// + /// ``` + /// use subtle::ConditionallySelectable; + /// # + /// # fn main() { + /// let mut x: u8 = 13; + /// let mut y: u8 = 42; + /// + /// u8::conditional_swap(&mut x, &mut y, 0.into()); + /// assert_eq!(x, 13); + /// assert_eq!(y, 42); + /// u8::conditional_swap(&mut x, &mut y, 1.into()); + /// assert_eq!(x, 42); + /// assert_eq!(y, 13); + /// # } + /// ``` + #[inline] + fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { + let t: Self = *a; + a.conditional_assign(&b, choice); + b.conditional_assign(&t, choice); + } +} + +macro_rules! to_signed_int { + (u8) => { + i8 + }; + (u16) => { + i16 + }; + (u32) => { + i32 + }; + (u64) => { + i64 + }; + (u128) => { + i128 + }; + (i8) => { + i8 + }; + (i16) => { + i16 + }; + (i32) => { + i32 + }; + (i64) => { + i64 + }; + (i128) => { + i128 + }; +} + +macro_rules! generate_integer_conditional_select { + ($($t:tt)*) => ($( + impl ConditionallySelectable for $t { + #[inline] + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + // if choice = 0, mask = (-0) = 0000...0000 + // if choice = 1, mask = (-1) = 1111...1111 + let mask = -(choice.unwrap_u8() as to_signed_int!($t)) as $t; + a ^ (mask & (a ^ b)) + } + + #[inline] + fn conditional_assign(&mut self, other: &Self, choice: Choice) { + // if choice = 0, mask = (-0) = 0000...0000 + // if choice = 1, mask = (-1) = 1111...1111 + let mask = -(choice.unwrap_u8() as to_signed_int!($t)) as $t; + *self ^= mask & (*self ^ *other); + } + + #[inline] + fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { + // if choice = 0, mask = (-0) = 0000...0000 + // if choice = 1, mask = (-1) = 1111...1111 + let mask = -(choice.unwrap_u8() as to_signed_int!($t)) as $t; + let t = mask & (*a ^ *b); + *a ^= t; + *b ^= t; + } + } + )*) +} + +generate_integer_conditional_select!( u8 i8); +generate_integer_conditional_select!( u16 i16); +generate_integer_conditional_select!( u32 i32); +generate_integer_conditional_select!( u64 i64); +#[cfg(feature = "i128")] +generate_integer_conditional_select!(u128 i128); + +/// `Ordering` is `#[repr(i8)]` where: +/// +/// - `Less` => -1 +/// - `Equal` => 0 +/// - `Greater` => 1 +/// +/// Given this, it's possible to operate on orderings as if they're integers, +/// which allows leveraging conditional masking for predication. +impl ConditionallySelectable for cmp::Ordering { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + let a = *a as i8; + let b = *b as i8; + let ret = i8::conditional_select(&a, &b, choice); + + // SAFETY: `Ordering` is `#[repr(i8)]` and `ret` has been assigned to + // a value which was originally a valid `Ordering` then cast to `i8` + unsafe { *((&ret as *const _) as *const cmp::Ordering) } + } +} + +impl ConditionallySelectable for Choice { + #[inline] + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Choice(u8::conditional_select(&a.0, &b.0, choice)) + } +} + +#[cfg(feature = "const-generics")] +impl ConditionallySelectable for [T; N] +where + T: ConditionallySelectable, +{ + #[inline] + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + let mut output = *a; + output.conditional_assign(b, choice); + output + } + + fn conditional_assign(&mut self, other: &Self, choice: Choice) { + for (a_i, b_i) in self.iter_mut().zip(other) { + a_i.conditional_assign(b_i, choice) + } + } +} + +/// A type which can be conditionally negated in constant time. +/// +/// # Note +/// +/// A generic implementation of `ConditionallyNegatable` is provided +/// for types `T` which are `ConditionallySelectable` and have `Neg` +/// implemented on `&T`. +// +// #[inline] is specified on these function prototypes to signify that they +#[allow(unused_attributes)] // should be in the actual implementation +pub trait ConditionallyNegatable { + /// Negate `self` if `choice == Choice(1)`; otherwise, leave it + /// unchanged. + /// + /// This function should execute in constant time. + #[inline] + #[allow(unused_attributes)] + fn conditional_negate(&mut self, choice: Choice); +} + +impl ConditionallyNegatable for T +where + T: ConditionallySelectable, + for<'a> &'a T: Neg, +{ + #[inline] + fn conditional_negate(&mut self, choice: Choice) { + // Need to cast to eliminate mutability + let self_neg: T = -(self as &T); + self.conditional_assign(&self_neg, choice); + } +} + +/// The `CtOption` type represents an optional value similar to the +/// [`Option`](core::option::Option) type but is intended for +/// use in constant time APIs. +/// +/// Any given `CtOption` is either `Some` or `None`, but unlike +/// `Option` these variants are not exposed. The +/// [`is_some()`](CtOption::is_some) method is used to determine if +/// the value is `Some`, and [`unwrap_or()`](CtOption::unwrap_or) and +/// [`unwrap_or_else()`](CtOption::unwrap_or_else) methods are +/// provided to access the underlying value. The value can also be +/// obtained with [`unwrap()`](CtOption::unwrap) but this will panic +/// if it is `None`. +/// +/// Functions that are intended to be constant time may not produce +/// valid results for all inputs, such as square root and inversion +/// operations in finite field arithmetic. Returning an `Option` +/// from these functions makes it difficult for the caller to reason +/// about the result in constant time, and returning an incorrect +/// value burdens the caller and increases the chance of bugs. +#[derive(Clone, Copy, Debug)] +pub struct CtOption { + value: T, + is_some: Choice, +} + +impl From> for Option { + /// Convert the `CtOption` wrapper into an `Option`, depending on whether + /// the underlying `is_some` `Choice` was a `0` or a `1` once unwrapped. + /// + /// # Note + /// + /// This function exists to avoid ending up with ugly, verbose and/or bad handled + /// conversions from the `CtOption` wraps to an `Option` or `Result`. + /// This implementation doesn't intend to be constant-time nor try to protect the + /// leakage of the `T` since the `Option` will do it anyways. + fn from(source: CtOption) -> Option { + if source.is_some().unwrap_u8() == 1u8 { + Option::Some(source.value) + } else { + None + } + } +} + +impl CtOption { + /// This method is used to construct a new `CtOption` and takes + /// a value of type `T`, and a `Choice` that determines whether + /// the optional value should be `Some` or not. If `is_some` is + /// false, the value will still be stored but its value is never + /// exposed. + #[inline] + pub fn new(value: T, is_some: Choice) -> CtOption { + CtOption { + value: value, + is_some: is_some, + } + } + + /// Returns the contained value, consuming the `self` value. + /// + /// # Panics + /// + /// Panics if the value is none with a custom panic message provided by + /// `msg`. + pub fn expect(self, msg: &str) -> T { + assert_eq!(self.is_some.unwrap_u8(), 1, "{}", msg); + + self.value + } + + /// This returns the underlying value but panics if it + /// is not `Some`. + #[inline] + pub fn unwrap(self) -> T { + assert_eq!(self.is_some.unwrap_u8(), 1); + + self.value + } + + /// This returns the underlying value if it is `Some` + /// or the provided value otherwise. + #[inline] + pub fn unwrap_or(self, def: T) -> T + where + T: ConditionallySelectable, + { + T::conditional_select(&def, &self.value, self.is_some) + } + + /// This returns the underlying value if it is `Some` + /// or the value produced by the provided closure otherwise. + /// + /// This operates in constant time, because the provided closure + /// is always called. + #[inline] + pub fn unwrap_or_else(self, f: F) -> T + where + T: ConditionallySelectable, + F: FnOnce() -> T, + { + T::conditional_select(&f(), &self.value, self.is_some) + } + + /// Returns a true `Choice` if this value is `Some`. + #[inline] + pub fn is_some(&self) -> Choice { + self.is_some + } + + /// Returns a true `Choice` if this value is `None`. + #[inline] + pub fn is_none(&self) -> Choice { + !self.is_some + } + + /// Returns a `None` value if the option is `None`, otherwise + /// returns a `CtOption` enclosing the value of the provided closure. + /// The closure is given the enclosed value or, if the option is + /// `None`, it is provided a dummy value computed using + /// `Default::default()`. + /// + /// This operates in constant time, because the provided closure + /// is always called. + #[inline] + pub fn map(self, f: F) -> CtOption + where + T: Default + ConditionallySelectable, + F: FnOnce(T) -> U, + { + CtOption::new( + f(T::conditional_select( + &T::default(), + &self.value, + self.is_some, + )), + self.is_some, + ) + } + + /// Returns a `None` value if the option is `None`, otherwise + /// returns the result of the provided closure. The closure is + /// given the enclosed value or, if the option is `None`, it + /// is provided a dummy value computed using `Default::default()`. + /// + /// This operates in constant time, because the provided closure + /// is always called. + #[inline] + pub fn and_then(self, f: F) -> CtOption + where + T: Default + ConditionallySelectable, + F: FnOnce(T) -> CtOption, + { + let mut tmp = f(T::conditional_select( + &T::default(), + &self.value, + self.is_some, + )); + tmp.is_some &= self.is_some; + + tmp + } + + /// Returns `self` if it contains a value, and otherwise returns the result of + /// calling `f`. The provided function `f` is always called. + #[inline] + pub fn or_else(self, f: F) -> CtOption + where + T: ConditionallySelectable, + F: FnOnce() -> CtOption, + { + let is_none = self.is_none(); + let f = f(); + + Self::conditional_select(&self, &f, is_none) + } + + /// Convert the `CtOption` wrapper into an `Option`, depending on whether + /// the underlying `is_some` `Choice` was a `0` or a `1` once unwrapped. + /// + /// # Note + /// + /// This function exists to avoid ending up with ugly, verbose and/or bad handled + /// conversions from the `CtOption` wraps to an `Option` or `Result`. + /// This implementation doesn't intend to be constant-time nor try to protect the + /// leakage of the `T` since the `Option` will do it anyways. + /// + /// It's equivalent to the corresponding `From` impl, however this version is + /// friendlier for type inference. + pub fn into_option(self) -> Option { + self.into() + } +} + +impl ConditionallySelectable for CtOption { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + CtOption::new( + T::conditional_select(&a.value, &b.value, choice), + Choice::conditional_select(&a.is_some, &b.is_some, choice), + ) + } +} + +impl ConstantTimeEq for CtOption { + /// Two `CtOption`s are equal if they are both `Some` and + /// their values are equal, or both `None`. + #[inline] + fn ct_eq(&self, rhs: &CtOption) -> Choice { + let a = self.is_some(); + let b = rhs.is_some(); + + (a & b & self.value.ct_eq(&rhs.value)) | (!a & !b) + } +} + +/// A type which can be compared in some manner and be determined to be greater +/// than another of the same type. +pub trait ConstantTimeGreater { + /// Determine whether `self > other`. + /// + /// The bitwise-NOT of the return value of this function should be usable to + /// determine if `self <= other`. + /// + /// This function should execute in constant time. + /// + /// # Returns + /// + /// A `Choice` with a set bit if `self > other`, and with no set bits + /// otherwise. + /// + /// # Example + /// + /// ``` + /// use subtle::ConstantTimeGreater; + /// + /// let x: u8 = 13; + /// let y: u8 = 42; + /// + /// let x_gt_y = x.ct_gt(&y); + /// + /// assert_eq!(x_gt_y.unwrap_u8(), 0); + /// + /// let y_gt_x = y.ct_gt(&x); + /// + /// assert_eq!(y_gt_x.unwrap_u8(), 1); + /// + /// let x_gt_x = x.ct_gt(&x); + /// + /// assert_eq!(x_gt_x.unwrap_u8(), 0); + /// ``` + fn ct_gt(&self, other: &Self) -> Choice; +} + +macro_rules! generate_unsigned_integer_greater { + ($t_u: ty, $bit_width: expr) => { + impl ConstantTimeGreater for $t_u { + /// Returns Choice::from(1) iff x > y, and Choice::from(0) iff x <= y. + /// + /// # Note + /// + /// This algoritm would also work for signed integers if we first + /// flip the top bit, e.g. `let x: u8 = x ^ 0x80`, etc. + #[inline] + fn ct_gt(&self, other: &$t_u) -> Choice { + let gtb = self & !other; // All the bits in self that are greater than their corresponding bits in other. + let mut ltb = !self & other; // All the bits in self that are less than their corresponding bits in other. + let mut pow = 1; + + // Less-than operator is okay here because it's dependent on the bit-width. + while pow < $bit_width { + ltb |= ltb >> pow; // Bit-smear the highest set bit to the right. + pow += pow; + } + let mut bit = gtb & !ltb; // Select the highest set bit. + let mut pow = 1; + + while pow < $bit_width { + bit |= bit >> pow; // Shift it to the right until we end up with either 0 or 1. + pow += pow; + } + // XXX We should possibly do the above flattening to 0 or 1 in the + // Choice constructor rather than making it a debug error? + Choice::from((bit & 1) as u8) + } + } + }; +} + +generate_unsigned_integer_greater!(u8, 8); +generate_unsigned_integer_greater!(u16, 16); +generate_unsigned_integer_greater!(u32, 32); +generate_unsigned_integer_greater!(u64, 64); +#[cfg(feature = "i128")] +generate_unsigned_integer_greater!(u128, 128); + +impl ConstantTimeGreater for cmp::Ordering { + #[inline] + fn ct_gt(&self, other: &Self) -> Choice { + // No impl of `ConstantTimeGreater` for `i8`, so use `u8` + let a = (*self as i8) + 1; + let b = (*other as i8) + 1; + (a as u8).ct_gt(&(b as u8)) + } +} + +/// A type which can be compared in some manner and be determined to be less +/// than another of the same type. +pub trait ConstantTimeLess: ConstantTimeEq + ConstantTimeGreater { + /// Determine whether `self < other`. + /// + /// The bitwise-NOT of the return value of this function should be usable to + /// determine if `self >= other`. + /// + /// A default implementation is provided and implemented for the unsigned + /// integer types. + /// + /// This function should execute in constant time. + /// + /// # Returns + /// + /// A `Choice` with a set bit if `self < other`, and with no set bits + /// otherwise. + /// + /// # Example + /// + /// ``` + /// use subtle::ConstantTimeLess; + /// + /// let x: u8 = 13; + /// let y: u8 = 42; + /// + /// let x_lt_y = x.ct_lt(&y); + /// + /// assert_eq!(x_lt_y.unwrap_u8(), 1); + /// + /// let y_lt_x = y.ct_lt(&x); + /// + /// assert_eq!(y_lt_x.unwrap_u8(), 0); + /// + /// let x_lt_x = x.ct_lt(&x); + /// + /// assert_eq!(x_lt_x.unwrap_u8(), 0); + /// ``` + #[inline] + fn ct_lt(&self, other: &Self) -> Choice { + !self.ct_gt(other) & !self.ct_eq(other) + } +} + +impl ConstantTimeLess for u8 {} +impl ConstantTimeLess for u16 {} +impl ConstantTimeLess for u32 {} +impl ConstantTimeLess for u64 {} +#[cfg(feature = "i128")] +impl ConstantTimeLess for u128 {} + +impl ConstantTimeLess for cmp::Ordering { + #[inline] + fn ct_lt(&self, other: &Self) -> Choice { + // No impl of `ConstantTimeLess` for `i8`, so use `u8` + let a = (*self as i8) + 1; + let b = (*other as i8) + 1; + (a as u8).ct_lt(&(b as u8)) + } +} + +/// Wrapper type which implements an optimization barrier for all accesses. +#[derive(Clone, Copy, Debug)] +pub struct BlackBox(T); + +impl BlackBox { + /// Constructs a new instance of `BlackBox` which will wrap the specified value. + /// + /// All access to the inner value will be mediated by a `black_box` optimization barrier. + pub fn new(value: T) -> Self { + Self(value) + } + + /// Read the inner value, applying an optimization barrier on access. + pub fn get(self) -> T { + black_box(self.0) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/tests/mod.rs b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/tests/mod.rs new file mode 100644 index 000000000000..888b9d0de7d8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/subtle-2.6.1/tests/mod.rs @@ -0,0 +1,432 @@ +use std::cmp; + +use rand::rngs::OsRng; +use rand::RngCore; + +use subtle::*; + +#[test] +#[should_panic] +fn slices_equal_different_lengths() { + let a: [u8; 3] = [0, 0, 0]; + let b: [u8; 4] = [0, 0, 0, 0]; + + assert_eq!((&a).ct_eq(&b).unwrap_u8(), 1); +} + +#[test] +fn slices_equal() { + let a: [u8; 8] = [1, 2, 3, 4, 5, 6, 7, 8]; + let b: [u8; 8] = [1, 2, 3, 4, 4, 3, 2, 1]; + + let a_eq_a = (&a).ct_eq(&a); + let a_eq_b = (&a).ct_eq(&b); + + assert_eq!(a_eq_a.unwrap_u8(), 1); + assert_eq!(a_eq_b.unwrap_u8(), 0); + + let c: [u8; 16] = [0u8; 16]; + + let a_eq_c = (&a).ct_eq(&c); + assert_eq!(a_eq_c.unwrap_u8(), 0); +} + +#[test] +fn conditional_assign_i32() { + let mut a: i32 = 5; + let b: i32 = 13; + + a.conditional_assign(&b, 0.into()); + assert_eq!(a, 5); + a.conditional_assign(&b, 1.into()); + assert_eq!(a, 13); +} + +#[test] +fn conditional_assign_i64() { + let mut c: i64 = 2343249123; + let d: i64 = 8723884895; + + c.conditional_assign(&d, 0.into()); + assert_eq!(c, 2343249123); + c.conditional_assign(&d, 1.into()); + assert_eq!(c, 8723884895); +} + +macro_rules! generate_integer_conditional_select_tests { + ($($t:ty)*) => ($( + let x: $t = 0; // all 0 bits + let y: $t = !0; // all 1 bits + + assert_eq!(<$t>::conditional_select(&x, &y, 0.into()), x); + assert_eq!(<$t>::conditional_select(&x, &y, 1.into()), y); + + let mut z = x; + let mut w = y; + + <$t>::conditional_swap(&mut z, &mut w, 0.into()); + assert_eq!(z, x); + assert_eq!(w, y); + <$t>::conditional_swap(&mut z, &mut w, 1.into()); + assert_eq!(z, y); + assert_eq!(w, x); + + z.conditional_assign(&x, 1.into()); + w.conditional_assign(&y, 0.into()); + assert_eq!(z, x); + assert_eq!(w, x); + )*) +} + +#[test] +fn integer_conditional_select() { + generate_integer_conditional_select_tests!(u8 u16 u32 u64); + generate_integer_conditional_select_tests!(i8 i16 i32 i64); + #[cfg(feature = "i128")] + generate_integer_conditional_select_tests!(i128 u128); +} + +#[test] +fn custom_conditional_select_i16() { + let x: i16 = 257; + let y: i16 = 514; + + assert_eq!(i16::conditional_select(&x, &y, 0.into()), 257); + assert_eq!(i16::conditional_select(&x, &y, 1.into()), 514); +} + +#[test] +fn ordering_conditional_select() { + assert_eq!( + cmp::Ordering::conditional_select(&cmp::Ordering::Less, &cmp::Ordering::Greater, 0.into()), + cmp::Ordering::Less + ); + + assert_eq!( + cmp::Ordering::conditional_select(&cmp::Ordering::Less, &cmp::Ordering::Greater, 1.into()), + cmp::Ordering::Greater + ); +} + +macro_rules! generate_integer_equal_tests { + ($($t:ty),*) => ($( + let y: $t = 0; // all 0 bits + let z: $t = !0; // all 1 bits + + let x = z; + + assert_eq!(x.ct_eq(&y).unwrap_u8(), 0); + assert_eq!(x.ct_eq(&z).unwrap_u8(), 1); + assert_eq!(x.ct_ne(&y).unwrap_u8(), 1); + assert_eq!(x.ct_ne(&z).unwrap_u8(), 0); + )*) +} + +#[test] +fn integer_equal() { + generate_integer_equal_tests!(u8, u16, u32, u64); + generate_integer_equal_tests!(i8, i16, i32, i64); + #[cfg(feature = "i128")] + generate_integer_equal_tests!(i128, u128); + generate_integer_equal_tests!(isize, usize); +} + +#[test] +fn choice_into_bool() { + let choice_true: bool = Choice::from(1).into(); + + assert!(choice_true); + + let choice_false: bool = Choice::from(0).into(); + + assert!(!choice_false); +} + +#[test] +fn conditional_select_choice() { + let t = Choice::from(1); + let f = Choice::from(0); + + assert_eq!(bool::from(Choice::conditional_select(&t, &f, f)), true); + assert_eq!(bool::from(Choice::conditional_select(&t, &f, t)), false); + assert_eq!(bool::from(Choice::conditional_select(&f, &t, f)), false); + assert_eq!(bool::from(Choice::conditional_select(&f, &t, t)), true); +} + +#[test] +fn choice_equal() { + assert!(Choice::from(0).ct_eq(&Choice::from(0)).unwrap_u8() == 1); + assert!(Choice::from(0).ct_eq(&Choice::from(1)).unwrap_u8() == 0); + assert!(Choice::from(1).ct_eq(&Choice::from(0)).unwrap_u8() == 0); + assert!(Choice::from(1).ct_eq(&Choice::from(1)).unwrap_u8() == 1); +} + +#[test] +fn ordering_equal() { + let a = cmp::Ordering::Equal; + let b = cmp::Ordering::Greater; + let c = a; + + assert_eq!(a.ct_eq(&b).unwrap_u8(), 0); + assert_eq!(a.ct_eq(&c).unwrap_u8(), 1); +} + +#[test] +fn test_ctoption() { + let a = CtOption::new(10, Choice::from(1)); + let b = CtOption::new(9, Choice::from(1)); + let c = CtOption::new(10, Choice::from(0)); + let d = CtOption::new(9, Choice::from(0)); + + // Test is_some / is_none + assert!(bool::from(a.is_some())); + assert!(bool::from(!a.is_none())); + assert!(bool::from(b.is_some())); + assert!(bool::from(!b.is_none())); + assert!(bool::from(!c.is_some())); + assert!(bool::from(c.is_none())); + assert!(bool::from(!d.is_some())); + assert!(bool::from(d.is_none())); + + // Test unwrap for Some + assert_eq!(a.unwrap(), 10); + assert_eq!(b.unwrap(), 9); + + // Test equality + assert!(bool::from(a.ct_eq(&a))); + assert!(bool::from(!a.ct_eq(&b))); + assert!(bool::from(!a.ct_eq(&c))); + assert!(bool::from(!a.ct_eq(&d))); + + // Test equality of None with different + // dummy value + assert!(bool::from(c.ct_eq(&d))); + + // Test unwrap_or + assert_eq!(CtOption::new(1, Choice::from(1)).unwrap_or(2), 1); + assert_eq!(CtOption::new(1, Choice::from(0)).unwrap_or(2), 2); + + // Test unwrap_or_else + assert_eq!(CtOption::new(1, Choice::from(1)).unwrap_or_else(|| 2), 1); + assert_eq!(CtOption::new(1, Choice::from(0)).unwrap_or_else(|| 2), 2); + + // Test map + assert_eq!( + CtOption::new(1, Choice::from(1)) + .map(|v| { + assert_eq!(v, 1); + 2 + }) + .unwrap(), + 2 + ); + assert_eq!( + CtOption::new(1, Choice::from(0)) + .map(|_| 2) + .is_none() + .unwrap_u8(), + 1 + ); + + // Test and_then + assert_eq!( + CtOption::new(1, Choice::from(1)) + .and_then(|v| { + assert_eq!(v, 1); + CtOption::new(2, Choice::from(0)) + }) + .is_none() + .unwrap_u8(), + 1 + ); + assert_eq!( + CtOption::new(1, Choice::from(1)) + .and_then(|v| { + assert_eq!(v, 1); + CtOption::new(2, Choice::from(1)) + }) + .unwrap(), + 2 + ); + + assert_eq!( + CtOption::new(1, Choice::from(0)) + .and_then(|_| CtOption::new(2, Choice::from(0))) + .is_none() + .unwrap_u8(), + 1 + ); + assert_eq!( + CtOption::new(1, Choice::from(0)) + .and_then(|_| CtOption::new(2, Choice::from(1))) + .is_none() + .unwrap_u8(), + 1 + ); + + // Test or_else + assert_eq!( + CtOption::new(1, Choice::from(0)) + .or_else(|| CtOption::new(2, Choice::from(1))) + .unwrap(), + 2 + ); + assert_eq!( + CtOption::new(1, Choice::from(1)) + .or_else(|| CtOption::new(2, Choice::from(0))) + .unwrap(), + 1 + ); + assert_eq!( + CtOption::new(1, Choice::from(1)) + .or_else(|| CtOption::new(2, Choice::from(1))) + .unwrap(), + 1 + ); + assert!(bool::from( + CtOption::new(1, Choice::from(0)) + .or_else(|| CtOption::new(2, Choice::from(0))) + .is_none() + )); + + // Test (in)equality + assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(1, Choice::from(1))).unwrap_u8() == 0); + assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(1, Choice::from(0))).unwrap_u8() == 0); + assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(2, Choice::from(1))).unwrap_u8() == 0); + assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(2, Choice::from(0))).unwrap_u8() == 0); + assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(1, Choice::from(0))).unwrap_u8() == 1); + assert!(CtOption::new(1, Choice::from(0)).ct_eq(&CtOption::new(2, Choice::from(0))).unwrap_u8() == 1); + assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(2, Choice::from(1))).unwrap_u8() == 0); + assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(2, Choice::from(1))).unwrap_u8() == 0); + assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(1, Choice::from(1))).unwrap_u8() == 1); + assert!(CtOption::new(1, Choice::from(1)).ct_eq(&CtOption::new(1, Choice::from(1))).unwrap_u8() == 1); +} + +#[test] +#[should_panic] +fn unwrap_none_ctoption() { + // This test might fail (in release mode?) if the + // compiler decides to optimize it away. + CtOption::new(10, Choice::from(0)).unwrap(); +} + +macro_rules! generate_greater_than_test { + ($ty: ty) => { + for _ in 0..100 { + let x = OsRng.next_u64() as $ty; + let y = OsRng.next_u64() as $ty; + let z = x.ct_gt(&y); + + println!("x={}, y={}, z={:?}", x, y, z); + + if x < y { + assert!(z.unwrap_u8() == 0); + } else if x == y { + assert!(z.unwrap_u8() == 0); + } else if x > y { + assert!(z.unwrap_u8() == 1); + } + } + } +} + +#[test] +fn greater_than_u8() { + generate_greater_than_test!(u8); +} + +#[test] +fn greater_than_u16() { + generate_greater_than_test!(u16); +} + +#[test] +fn greater_than_u32() { + generate_greater_than_test!(u32); +} + +#[test] +fn greater_than_u64() { + generate_greater_than_test!(u64); +} + +#[cfg(feature = "i128")] +#[test] +fn greater_than_u128() { + generate_greater_than_test!(u128); +} + +#[test] +fn greater_than_ordering() { + assert_eq!(cmp::Ordering::Less.ct_gt(&cmp::Ordering::Greater).unwrap_u8(), 0); + assert_eq!(cmp::Ordering::Greater.ct_gt(&cmp::Ordering::Less).unwrap_u8(), 1); +} + +#[test] +/// Test that the two's compliment min and max, i.e. 0000...0001 < 1111...1110, +/// gives the correct result. (This fails using the bit-twiddling algorithm that +/// go/crypto/subtle uses.) +fn less_than_twos_compliment_minmax() { + let z = 1u32.ct_lt(&(2u32.pow(31)-1)); + + assert!(z.unwrap_u8() == 1); +} + +macro_rules! generate_less_than_test { + ($ty: ty) => { + for _ in 0..100 { + let x = OsRng.next_u64() as $ty; + let y = OsRng.next_u64() as $ty; + let z = x.ct_gt(&y); + + println!("x={}, y={}, z={:?}", x, y, z); + + if x < y { + assert!(z.unwrap_u8() == 0); + } else if x == y { + assert!(z.unwrap_u8() == 0); + } else if x > y { + assert!(z.unwrap_u8() == 1); + } + } + } +} + +#[test] +fn less_than_u8() { + generate_less_than_test!(u8); +} + +#[test] +fn less_than_u16() { + generate_less_than_test!(u16); +} + +#[test] +fn less_than_u32() { + generate_less_than_test!(u32); +} + +#[test] +fn less_than_u64() { + generate_less_than_test!(u64); +} + +#[cfg(feature = "i128")] +#[test] +fn less_than_u128() { + generate_less_than_test!(u128); +} + +#[test] +fn less_than_ordering() { + assert_eq!(cmp::Ordering::Greater.ct_lt(&cmp::Ordering::Less).unwrap_u8(), 0); + assert_eq!(cmp::Ordering::Less.ct_lt(&cmp::Ordering::Greater).unwrap_u8(), 1); +} + +#[test] +fn black_box_round_trip() { + let n = 42u64; + let black_box = BlackBox::new(n); + assert_eq!(n, black_box.get()); +} diff --git a/third_party/rust/chromium_crates_io/vendor/unsigned-varint-0.7.1/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/unsigned-varint-0.7.1/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/.cargo_vcs_info.json new file mode 100644 index 000000000000..d3b07db1581c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "875d166da5858b86122961ce6360acc771572d21" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/Cargo.toml new file mode 100644 index 000000000000..50ceb9192dbb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/Cargo.toml @@ -0,0 +1,65 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "synstructure" +version = "0.13.1" +authors = ["Nika Layzell "] +include = [ + "src/**/*", + "Cargo.toml", + "README.md", + "LICENSE", +] +description = "Helper methods and macros for custom derives" +documentation = "https://docs.rs/synstructure" +readme = "README.md" +keywords = [ + "syn", + "macros", + "derive", + "expand_substructure", + "enum", +] +license = "MIT" +repository = "https://github.com/mystor/synstructure" + +[dependencies.proc-macro2] +version = "1.0.60" +default-features = false + +[dependencies.quote] +version = "1" +default-features = false + +[dependencies.syn] +version = "2" +features = [ + "derive", + "parsing", + "printing", + "clone-impls", + "visit", + "extra-traits", +] +default-features = false + +[dev-dependencies.synstructure_test_traits] +version = "0.1" + +[features] +default = ["proc-macro"] +proc-macro = [ + "proc-macro2/proc-macro", + "syn/proc-macro", + "quote/proc-macro", +] diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/Cargo.toml.orig new file mode 100644 index 000000000000..458b5cde7250 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/Cargo.toml.orig @@ -0,0 +1,35 @@ +[package] +name = "synstructure" +version = "0.13.1" +authors = ["Nika Layzell "] +edition = "2018" + +description = "Helper methods and macros for custom derives" +documentation = "https://docs.rs/synstructure" +repository = "https://github.com/mystor/synstructure" +readme = "README.md" +license = "MIT" +keywords = ["syn", "macros", "derive", "expand_substructure", "enum"] + +include = ["src/**/*", "Cargo.toml", "README.md", "LICENSE"] + +[features] +default = ["proc-macro"] +proc-macro = ["proc-macro2/proc-macro", "syn/proc-macro", "quote/proc-macro"] + +[dependencies] +proc-macro2 = { version = "1.0.60", default-features = false } +quote = { version = "1", default-features = false } + +[dependencies.syn] +version = "2" +default-features = false +features = ["derive", "parsing", "printing", "clone-impls", "visit", "extra-traits"] + +[dev-dependencies] +# Used in the documentation as an example trait crate provider. Unfortunately, +# we need to publish this in order to be able to publish synstructure. +synstructure_test_traits = { version = "0.1", path = "test_traits" } + +[workspace] +members = ["test_traits", "test_suite", "test_suite/test_macros"] diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/LICENSE b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/LICENSE new file mode 100644 index 000000000000..f78f1c15d129 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/LICENSE @@ -0,0 +1,7 @@ +Copyright 2016 Nika Layzell + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/README.md b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/README.md new file mode 100644 index 000000000000..1eefc1ed7314 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/README.md @@ -0,0 +1,157 @@ +# synstructure + +[![Latest Version](https://img.shields.io/crates/v/synstructure.svg)](https://crates.io/crates/synstructure) +[![Documentation](https://docs.rs/synstructure/badge.svg)](https://docs.rs/synstructure) +[![Build Status](https://travis-ci.org/mystor/synstructure.svg?branch=master)](https://travis-ci.org/mystor/synstructure) +[![Rustc Version 1.31+](https://img.shields.io/badge/rustc-1.31+-lightgray.svg)](https://blog.rust-lang.org/2018/12/06/Rust-1.31-and-rust-2018.html) + +> NOTE: What follows is an exerpt from the module level documentation. For full +> details read the docs on [docs.rs](https://docs.rs/synstructure/) + +This crate provides helper types for matching against enum variants, and +extracting bindings to each of the fields in the deriving Struct or Enum in +a generic way. + +If you are writing a `#[derive]` which needs to perform some operation on +every field, then you have come to the right place! + +# Example: `WalkFields` +### Trait Implementation +```rust +pub trait WalkFields: std::any::Any { + fn walk_fields(&self, walk: &mut FnMut(&WalkFields)); +} +impl WalkFields for i32 { + fn walk_fields(&self, _walk: &mut FnMut(&WalkFields)) {} +} +``` + +### Custom Derive +```rust +#[macro_use] +extern crate synstructure; +#[macro_use] +extern crate quote; +extern crate proc_macro2; + +fn walkfields_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { + let body = s.each(|bi| quote!{ + walk(#bi) + }); + + s.bound_impl(quote!(example_traits::WalkFields), quote!{ + fn walk_fields(&self, walk: &mut FnMut(&example_traits::WalkFields)) { + match *self { #body } + } + }) +} +decl_derive!([WalkFields] => walkfields_derive); + +/* + * Test Case + */ +fn main() { + test_derive! { + walkfields_derive { + enum A { + B(i32, T), + C(i32), + } + } + expands to { + const _: () = { + extern crate example_traits; + impl example_traits::WalkFields for A + where T: example_traits::WalkFields + { + fn walk_fields(&self, walk: &mut FnMut(&example_traits::WalkFields)) { + match *self { + A::B(ref __binding_0, ref __binding_1,) => { + { walk(__binding_0) } + { walk(__binding_1) } + } + A::C(ref __binding_0,) => { + { walk(__binding_0) } + } + } + } + } + }; + } + } +} +``` + +# Example: `Interest` +### Trait Implementation +```rust +pub trait Interest { + fn interesting(&self) -> bool; +} +impl Interest for i32 { + fn interesting(&self) -> bool { *self > 0 } +} +``` + +### Custom Derive +```rust +#[macro_use] +extern crate synstructure; +#[macro_use] +extern crate quote; +extern crate proc_macro2; + +fn interest_derive(mut s: synstructure::Structure) -> proc_macro2::TokenStream { + let body = s.fold(false, |acc, bi| quote!{ + #acc || example_traits::Interest::interesting(#bi) + }); + + s.bound_impl(quote!(example_traits::Interest), quote!{ + fn interesting(&self) -> bool { + match *self { + #body + } + } + }) +} +decl_derive!([Interest] => interest_derive); + +/* + * Test Case + */ +fn main() { + test_derive!{ + interest_derive { + enum A { + B(i32, T), + C(i32), + } + } + expands to { + const _: () = { + extern crate example_traits; + impl example_traits::Interest for A + where T: example_traits::Interest + { + fn interesting(&self) -> bool { + match *self { + A::B(ref __binding_0, ref __binding_1,) => { + false || + example_traits::Interest::interesting(__binding_0) || + example_traits::Interest::interesting(__binding_1) + } + A::C(ref __binding_0,) => { + false || + example_traits::Interest::interesting(__binding_0) + } + } + } + } + }; + } + } +} +``` + +For more example usage, consider investigating the `abomonation_derive` crate, +which makes use of this crate, and is fairly simple. diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/src/lib.rs new file mode 100644 index 000000000000..0a63a39c27af --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/src/lib.rs @@ -0,0 +1,2556 @@ +//! This crate provides helper types for matching against enum variants, and +//! extracting bindings to each of the fields in the deriving Struct or Enum in +//! a generic way. +//! +//! If you are writing a `#[derive]` which needs to perform some operation on +//! every field, then you have come to the right place! +//! +//! # Example: `WalkFields` +//! ### Trait Implementation +//! ``` +//! pub trait WalkFields: std::any::Any { +//! fn walk_fields(&self, walk: &mut FnMut(&WalkFields)); +//! } +//! impl WalkFields for i32 { +//! fn walk_fields(&self, _walk: &mut FnMut(&WalkFields)) {} +//! } +//! ``` +//! +//! ### Custom Derive +//! ``` +//! # use quote::quote; +//! fn walkfields_derive(s: synstructure::Structure) -> proc_macro2::TokenStream { +//! let body = s.each(|bi| quote!{ +//! walk(#bi) +//! }); +//! +//! s.gen_impl(quote! { +//! extern crate synstructure_test_traits; +//! +//! gen impl synstructure_test_traits::WalkFields for @Self { +//! fn walk_fields(&self, walk: &mut FnMut(&synstructure_test_traits::WalkFields)) { +//! match *self { #body } +//! } +//! } +//! }) +//! } +//! # const _IGNORE: &'static str = stringify!( +//! synstructure::decl_derive!([WalkFields] => walkfields_derive); +//! # ); +//! +//! /* +//! * Test Case +//! */ +//! fn main() { +//! synstructure::test_derive! { +//! walkfields_derive { +//! enum A { +//! B(i32, T), +//! C(i32), +//! } +//! } +//! expands to { +//! const _: () = { +//! extern crate synstructure_test_traits; +//! impl synstructure_test_traits::WalkFields for A +//! where T: synstructure_test_traits::WalkFields +//! { +//! fn walk_fields(&self, walk: &mut FnMut(&synstructure_test_traits::WalkFields)) { +//! match *self { +//! A::B(ref __binding_0, ref __binding_1,) => { +//! { walk(__binding_0) } +//! { walk(__binding_1) } +//! } +//! A::C(ref __binding_0,) => { +//! { walk(__binding_0) } +//! } +//! } +//! } +//! } +//! }; +//! } +//! } +//! } +//! ``` +//! +//! # Example: `Interest` +//! ### Trait Implementation +//! ``` +//! pub trait Interest { +//! fn interesting(&self) -> bool; +//! } +//! impl Interest for i32 { +//! fn interesting(&self) -> bool { *self > 0 } +//! } +//! ``` +//! +//! ### Custom Derive +//! ``` +//! # use quote::quote; +//! fn interest_derive(mut s: synstructure::Structure) -> proc_macro2::TokenStream { +//! let body = s.fold(false, |acc, bi| quote!{ +//! #acc || synstructure_test_traits::Interest::interesting(#bi) +//! }); +//! +//! s.gen_impl(quote! { +//! extern crate synstructure_test_traits; +//! gen impl synstructure_test_traits::Interest for @Self { +//! fn interesting(&self) -> bool { +//! match *self { +//! #body +//! } +//! } +//! } +//! }) +//! } +//! # const _IGNORE: &'static str = stringify!( +//! synstructure::decl_derive!([Interest] => interest_derive); +//! # ); +//! +//! /* +//! * Test Case +//! */ +//! fn main() { +//! synstructure::test_derive!{ +//! interest_derive { +//! enum A { +//! B(i32, T), +//! C(i32), +//! } +//! } +//! expands to { +//! const _: () = { +//! extern crate synstructure_test_traits; +//! impl synstructure_test_traits::Interest for A +//! where T: synstructure_test_traits::Interest +//! { +//! fn interesting(&self) -> bool { +//! match *self { +//! A::B(ref __binding_0, ref __binding_1,) => { +//! false || +//! synstructure_test_traits::Interest::interesting(__binding_0) || +//! synstructure_test_traits::Interest::interesting(__binding_1) +//! } +//! A::C(ref __binding_0,) => { +//! false || +//! synstructure_test_traits::Interest::interesting(__binding_0) +//! } +//! } +//! } +//! } +//! }; +//! } +//! } +//! } +//! ``` +//! +//! For more example usage, consider investigating the `abomonation_derive` crate, +//! which makes use of this crate, and is fairly simple. + +#![allow( + clippy::default_trait_access, + clippy::missing_errors_doc, + clippy::missing_panics_doc, + clippy::must_use_candidate, + clippy::needless_pass_by_value +)] + +#[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" +))] +extern crate proc_macro; + +use std::collections::HashSet; + +use syn::parse::{ParseStream, Parser}; +use syn::visit::{self, Visit}; +use syn::{ + braced, punctuated, token, Attribute, Data, DeriveInput, Error, Expr, Field, Fields, + FieldsNamed, FieldsUnnamed, GenericParam, Generics, Ident, PredicateType, Result, Token, + TraitBound, Type, TypeMacro, TypeParamBound, TypePath, WhereClause, WherePredicate, +}; + +use quote::{format_ident, quote_spanned, ToTokens}; +// re-export the quote! macro so we can depend on it being around in our macro's +// implementations. +#[doc(hidden)] +pub use quote::quote; + +use proc_macro2::{Span, TokenStream, TokenTree}; + +// NOTE: This module has documentation hidden, as it only exports macros (which +// always appear in the root of the crate) and helper methods / re-exports used +// in the implementation of those macros. +#[doc(hidden)] +pub mod macros; + +/// Changes how bounds are added +#[allow(clippy::manual_non_exhaustive)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum AddBounds { + /// Add for fields and generics + Both, + /// Fields only + Fields, + /// Generics only + Generics, + /// None + None, + #[doc(hidden)] + __Nonexhaustive, +} + +/// The type of binding to use when generating a pattern. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum BindStyle { + /// `x` + Move, + /// `mut x` + MoveMut, + /// `ref x` + Ref, + /// `ref mut x` + RefMut, +} + +impl ToTokens for BindStyle { + fn to_tokens(&self, tokens: &mut TokenStream) { + match self { + BindStyle::Move => {} + BindStyle::MoveMut => quote_spanned!(Span::call_site() => mut).to_tokens(tokens), + BindStyle::Ref => quote_spanned!(Span::call_site() => ref).to_tokens(tokens), + BindStyle::RefMut => quote_spanned!(Span::call_site() => ref mut).to_tokens(tokens), + } + } +} + +// Internal method for merging seen_generics arrays together. +fn generics_fuse(res: &mut Vec, new: &[bool]) { + for (i, &flag) in new.iter().enumerate() { + if i == res.len() { + res.push(false); + } + if flag { + res[i] = true; + } + } +} + +// Internal method for extracting the set of generics which have been matched. +fn fetch_generics<'a>(set: &[bool], generics: &'a Generics) -> Vec<&'a Ident> { + let mut tys = vec![]; + for (&seen, param) in set.iter().zip(generics.params.iter()) { + if seen { + if let GenericParam::Type(tparam) = param { + tys.push(&tparam.ident); + } + } + } + tys +} + +// Internal method to merge two Generics objects together intelligently. +fn merge_generics(into: &mut Generics, from: &Generics) -> Result<()> { + // Try to add the param into `into`, and merge parmas with identical names. + for p in &from.params { + for op in &into.params { + match (op, p) { + (GenericParam::Type(otp), GenericParam::Type(tp)) => { + // NOTE: This is only OK because syn ignores the span for equality purposes. + if otp.ident == tp.ident { + return Err(Error::new_spanned( + p, + format!( + "Attempted to merge conflicting generic parameters: {} and {}", + quote!(#op), + quote!(#p) + ), + )); + } + } + (GenericParam::Lifetime(olp), GenericParam::Lifetime(lp)) => { + // NOTE: This is only OK because syn ignores the span for equality purposes. + if olp.lifetime == lp.lifetime { + return Err(Error::new_spanned( + p, + format!( + "Attempted to merge conflicting generic parameters: {} and {}", + quote!(#op), + quote!(#p) + ), + )); + } + } + // We don't support merging Const parameters, because that wouldn't make much sense. + _ => (), + } + } + into.params.push(p.clone()); + } + + // Add any where clauses from the input generics object. + if let Some(from_clause) = &from.where_clause { + into.make_where_clause() + .predicates + .extend(from_clause.predicates.iter().cloned()); + } + + Ok(()) +} + +/// Helper method which does the same thing as rustc 1.20's +/// `Option::get_or_insert_with`. This method is used to keep backwards +/// compatibility with rustc 1.15. +fn get_or_insert_with(opt: &mut Option, f: F) -> &mut T +where + F: FnOnce() -> T, +{ + if opt.is_none() { + *opt = Some(f()); + } + + match opt { + Some(v) => v, + None => unreachable!(), + } +} + +/// Information about a specific binding. This contains both an `Ident` +/// reference to the given field, and the syn `&'a Field` descriptor for that +/// field. +/// +/// This type supports `quote::ToTokens`, so can be directly used within the +/// `quote!` macro. It expands to a reference to the matched field. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct BindingInfo<'a> { + /// The name which this BindingInfo will bind to. + pub binding: Ident, + + /// The type of binding which this BindingInfo will create. + pub style: BindStyle, + + field: &'a Field, + + // These are used to determine which type parameters are avaliable. + generics: &'a Generics, + seen_generics: Vec, + // The original index of the binding + // this will not change when .filter() is called + index: usize, +} + +impl<'a> ToTokens for BindingInfo<'a> { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.binding.to_tokens(tokens); + } +} + +impl<'a> BindingInfo<'a> { + /// Returns a reference to the underlying `syn` AST node which this + /// `BindingInfo` references + pub fn ast(&self) -> &'a Field { + self.field + } + + /// Generates the pattern fragment for this field binding. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].bindings()[0].pat().to_string(), + /// quote! { + /// ref __binding_0 + /// }.to_string() + /// ); + /// ``` + pub fn pat(&self) -> TokenStream { + let BindingInfo { binding, style, .. } = self; + quote!(#style #binding) + } + + /// Returns a list of the type parameters which are referenced in this + /// field's type. + /// + /// # Caveat + /// + /// If the field contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// struct A { + /// a: Option, + /// b: U, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].bindings()[0].referenced_ty_params(), + /// &["e::format_ident!("T")] + /// ); + /// ``` + pub fn referenced_ty_params(&self) -> Vec<&'a Ident> { + fetch_generics(&self.seen_generics, self.generics) + } +} + +/// This type is similar to `syn`'s `Variant` type, however each of the fields +/// are references rather than owned. When this is used as the AST for a real +/// variant, this struct simply borrows the fields of the `syn::Variant`, +/// however this type may also be used as the sole variant for a struct. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct VariantAst<'a> { + pub attrs: &'a [Attribute], + pub ident: &'a Ident, + pub fields: &'a Fields, + pub discriminant: &'a Option<(token::Eq, Expr)>, +} + +/// A wrapper around a `syn::DeriveInput`'s variant which provides utilities +/// for destructuring `Variant`s with `match` expressions. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VariantInfo<'a> { + pub prefix: Option<&'a Ident>, + bindings: Vec>, + ast: VariantAst<'a>, + generics: &'a Generics, + // The original length of `bindings` before any `.filter()` calls + original_length: usize, +} + +/// Helper function used by the `VariantInfo` constructor. Walks all of the types +/// in `field` and returns a list of the type parameters from `ty_params` which +/// are referenced in the field. +fn get_ty_params(field: &Field, generics: &Generics) -> Vec { + // Helper type. Discovers all identifiers inside of the visited type, + // and calls a callback with them. + struct BoundTypeLocator<'a> { + result: Vec, + generics: &'a Generics, + } + + impl<'a> Visit<'a> for BoundTypeLocator<'a> { + // XXX: This also (intentionally) captures paths like T::SomeType. Is + // this desirable? + fn visit_ident(&mut self, id: &Ident) { + for (idx, i) in self.generics.params.iter().enumerate() { + if let GenericParam::Type(tparam) = i { + if tparam.ident == *id { + self.result[idx] = true; + } + } + } + } + + fn visit_type_macro(&mut self, x: &'a TypeMacro) { + // If we see a type_mac declaration, then we can't know what type parameters + // it might be binding, so we presume it binds all of them. + for r in &mut self.result { + *r = true; + } + visit::visit_type_macro(self, x); + } + } + + let mut btl = BoundTypeLocator { + result: vec![false; generics.params.len()], + generics, + }; + + btl.visit_type(&field.ty); + + btl.result +} + +impl<'a> VariantInfo<'a> { + fn new(ast: VariantAst<'a>, prefix: Option<&'a Ident>, generics: &'a Generics) -> Self { + let bindings = match ast.fields { + Fields::Unit => vec![], + Fields::Unnamed(FieldsUnnamed { + unnamed: fields, .. + }) + | Fields::Named(FieldsNamed { named: fields, .. }) => { + fields + .into_iter() + .enumerate() + .map(|(i, field)| { + BindingInfo { + // XXX: This has to be call_site to avoid privacy + // when deriving on private fields. + binding: format_ident!("__binding_{}", i), + style: BindStyle::Ref, + field, + generics, + seen_generics: get_ty_params(field, generics), + index: i, + } + }) + .collect::>() + } + }; + + let original_length = bindings.len(); + VariantInfo { + prefix, + bindings, + ast, + generics, + original_length, + } + } + + /// Returns a slice of the bindings in this Variant. + pub fn bindings(&self) -> &[BindingInfo<'a>] { + &self.bindings + } + + /// Returns a mut slice of the bindings in this Variant. + pub fn bindings_mut(&mut self) -> &mut [BindingInfo<'a>] { + &mut self.bindings + } + + /// Returns a `VariantAst` object which contains references to the + /// underlying `syn` AST node which this `Variant` was created from. + pub fn ast(&self) -> VariantAst<'a> { + self.ast + } + + /// True if any bindings were omitted due to a `filter` call. + pub fn omitted_bindings(&self) -> bool { + self.original_length != self.bindings.len() + } + + /// Generates the match-arm pattern which could be used to match against this Variant. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].pat().to_string(), + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) + /// }.to_string() + /// ); + /// ``` + pub fn pat(&self) -> TokenStream { + let mut t = TokenStream::new(); + if let Some(prefix) = self.prefix { + prefix.to_tokens(&mut t); + quote!(::).to_tokens(&mut t); + } + self.ast.ident.to_tokens(&mut t); + match self.ast.fields { + Fields::Unit => { + assert!(self.bindings.is_empty()); + } + Fields::Unnamed(..) => token::Paren(Span::call_site()).surround(&mut t, |t| { + let mut expected_index = 0; + for binding in &self.bindings { + while expected_index < binding.index { + quote!(_,).to_tokens(t); + expected_index += 1; + } + binding.pat().to_tokens(t); + quote!(,).to_tokens(t); + expected_index += 1; + } + if expected_index != self.original_length { + quote!(..).to_tokens(t); + } + }), + Fields::Named(..) => token::Brace(Span::call_site()).surround(&mut t, |t| { + for binding in &self.bindings { + binding.field.ident.to_tokens(t); + quote!(:).to_tokens(t); + binding.pat().to_tokens(t); + quote!(,).to_tokens(t); + } + if self.omitted_bindings() { + quote!(..).to_tokens(t); + } + }), + } + t + } + + /// Generates the token stream required to construct the current variant. + /// + /// The init array initializes each of the fields in the order they are + /// written in `variant.ast().fields`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(usize, usize), + /// C{ v: usize }, + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].construct(|_, i| quote!(#i)).to_string(), + /// + /// quote!{ + /// A::B(0usize, 1usize,) + /// }.to_string() + /// ); + /// + /// assert_eq!( + /// s.variants()[1].construct(|_, i| quote!(#i)).to_string(), + /// + /// quote!{ + /// A::C{ v: 0usize, } + /// }.to_string() + /// ); + /// ``` + pub fn construct(&self, mut func: F) -> TokenStream + where + F: FnMut(&Field, usize) -> T, + T: ToTokens, + { + let mut t = TokenStream::new(); + if let Some(prefix) = self.prefix { + quote!(#prefix ::).to_tokens(&mut t); + } + self.ast.ident.to_tokens(&mut t); + + match &self.ast.fields { + Fields::Unit => (), + Fields::Unnamed(FieldsUnnamed { unnamed, .. }) => { + token::Paren::default().surround(&mut t, |t| { + for (i, field) in unnamed.into_iter().enumerate() { + func(field, i).to_tokens(t); + quote!(,).to_tokens(t); + } + }); + } + Fields::Named(FieldsNamed { named, .. }) => { + token::Brace::default().surround(&mut t, |t| { + for (i, field) in named.into_iter().enumerate() { + field.ident.to_tokens(t); + quote!(:).to_tokens(t); + func(field, i).to_tokens(t); + quote!(,).to_tokens(t); + } + }); + } + } + t + } + + /// Runs the passed-in function once for each bound field, passing in a `BindingInfo`. + /// and generating a `match` arm which evaluates the returned tokens. + /// + /// This method will ignore fields which are ignored through the `filter` + /// method. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) => { + /// { println!("{:?}", __binding_0) } + /// { println!("{:?}", __binding_1) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn each(&self, mut f: F) -> TokenStream + where + F: FnMut(&BindingInfo<'_>) -> R, + R: ToTokens, + { + let pat = self.pat(); + let mut body = TokenStream::new(); + for binding in &self.bindings { + token::Brace::default().surround(&mut body, |body| { + f(binding).to_tokens(body); + }); + } + quote!(#pat => { #body }) + } + + /// Runs the passed-in function once for each bound field, passing in the + /// result of the previous call, and a `BindingInfo`. generating a `match` + /// arm which evaluates to the resulting tokens. + /// + /// This method will ignore fields which are ignored through the `filter` + /// method. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].fold(quote!(0), |acc, bi| quote!(#acc + #bi)).to_string(), + /// + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) => { + /// 0 + __binding_0 + __binding_1 + /// } + /// }.to_string() + /// ); + /// ``` + pub fn fold(&self, init: I, mut f: F) -> TokenStream + where + F: FnMut(TokenStream, &BindingInfo<'_>) -> R, + I: ToTokens, + R: ToTokens, + { + let pat = self.pat(); + let body = self.bindings.iter().fold(quote!(#init), |i, bi| { + let r = f(i, bi); + quote!(#r) + }); + quote!(#pat => { #body }) + } + + /// Filter the bindings created by this `Variant` object. This has 2 effects: + /// + /// * The bindings will no longer appear in match arms generated by methods + /// on this `Variant` or its subobjects. + /// + /// * Impl blocks created with the `bound_impl` or `unsafe_bound_impl` + /// method only consider type parameters referenced in the types of + /// non-filtered fields. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C{ a: u32 }, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.variants_mut()[0].filter(|bi| { + /// bi.ast().ident == Some(quote::format_ident!("b")) + /// }); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ b: ref __binding_1, .. } => { + /// { println!("{:?}", __binding_1) } + /// } + /// A::C{ a: ref __binding_0, } => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn filter(&mut self, f: F) -> &mut Self + where + F: FnMut(&BindingInfo<'_>) -> bool, + { + self.bindings.retain(f); + self + } + + /// Iterates all the bindings of this `Variant` object and uses a closure to determine if a + /// binding should be removed. If the closure returns `true` the binding is removed from the + /// variant. If the closure returns `false`, the binding remains in the variant. + /// + /// All the removed bindings are moved to a new `Variant` object which is otherwise identical + /// to the current one. To understand the effects of removing a binding from a variant check + /// the [`VariantInfo::filter`] documentation. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C{ a: u32 }, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// let mut with_b = &mut s.variants_mut()[0]; + /// + /// let with_a = with_b.drain_filter(|bi| { + /// bi.ast().ident == Some(quote::format_ident!("a")) + /// }); + /// + /// assert_eq!( + /// with_a.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ a: ref __binding_0, .. } => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// + /// assert_eq!( + /// with_b.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ b: ref __binding_1, .. } => { + /// { println!("{:?}", __binding_1) } + /// } + /// }.to_string() + /// ); + /// ``` + #[allow(clippy::return_self_not_must_use)] + pub fn drain_filter(&mut self, mut f: F) -> Self + where + F: FnMut(&BindingInfo<'_>) -> bool, + { + let mut other = VariantInfo { + prefix: self.prefix, + bindings: vec![], + ast: self.ast, + generics: self.generics, + original_length: self.original_length, + }; + + let (other_bindings, self_bindings) = self.bindings.drain(..).partition(&mut f); + other.bindings = other_bindings; + self.bindings = self_bindings; + + other + } + + /// Remove the binding at the given index. + /// + /// # Panics + /// + /// Panics if the index is out of range. + pub fn remove_binding(&mut self, idx: usize) -> &mut Self { + self.bindings.remove(idx); + self + } + + /// Updates the `BindStyle` for each of the passed-in fields by calling the + /// passed-in function for each `BindingInfo`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.variants_mut()[0].bind_with(|bi| BindStyle::RefMut); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B(ref mut __binding_0, ref mut __binding_1,) => { + /// { println!("{:?}", __binding_0) } + /// { println!("{:?}", __binding_1) } + /// } + /// A::C(ref __binding_0,) => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn bind_with(&mut self, mut f: F) -> &mut Self + where + F: FnMut(&BindingInfo<'_>) -> BindStyle, + { + for binding in &mut self.bindings { + binding.style = f(binding); + } + self + } + + /// Updates the binding name for each fo the passed-in fields by calling the + /// passed-in function for each `BindingInfo`. + /// + /// The function will be called with the `BindingInfo` and its index in the + /// enclosing variant. + /// + /// The default name is `__binding_{}` where `{}` is replaced with an + /// increasing number. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C{ a: u32 }, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.variants_mut()[0].binding_name(|bi, i| bi.ident.clone().unwrap()); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ a: ref a, b: ref b, } => { + /// { println!("{:?}", a) } + /// { println!("{:?}", b) } + /// } + /// A::C{ a: ref __binding_0, } => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn binding_name(&mut self, mut f: F) -> &mut Self + where + F: FnMut(&Field, usize) -> Ident, + { + for (it, binding) in self.bindings.iter_mut().enumerate() { + binding.binding = f(binding.field, it); + } + self + } + + /// Returns a list of the type parameters which are referenced in this + /// field's type. + /// + /// # Caveat + /// + /// If the field contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// struct A { + /// a: Option, + /// b: U, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// assert_eq!( + /// s.variants()[0].bindings()[0].referenced_ty_params(), + /// &["e::format_ident!("T")] + /// ); + /// ``` + pub fn referenced_ty_params(&self) -> Vec<&'a Ident> { + let mut flags = Vec::new(); + for binding in &self.bindings { + generics_fuse(&mut flags, &binding.seen_generics); + } + fetch_generics(&flags, self.generics) + } +} + +/// A wrapper around a `syn::DeriveInput` which provides utilities for creating +/// custom derive trait implementations. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Structure<'a> { + variants: Vec>, + omitted_variants: bool, + ast: &'a DeriveInput, + extra_impl: Vec, + extra_predicates: Vec, + add_bounds: AddBounds, +} + +impl<'a> Structure<'a> { + /// Create a new `Structure` with the variants and fields from the passed-in + /// `DeriveInput`. + /// + /// # Panics + /// + /// This method will panic if the provided AST node represents an untagged + /// union. + pub fn new(ast: &'a DeriveInput) -> Self { + Self::try_new(ast).expect("Unable to create synstructure::Structure") + } + + /// Create a new `Structure` with the variants and fields from the passed-in + /// `DeriveInput`. + /// + /// Unlike `Structure::new`, this method does not panic if the provided AST + /// node represents an untagged union. + pub fn try_new(ast: &'a DeriveInput) -> Result { + let variants = match &ast.data { + Data::Enum(data) => (&data.variants) + .into_iter() + .map(|v| { + VariantInfo::new( + VariantAst { + attrs: &v.attrs, + ident: &v.ident, + fields: &v.fields, + discriminant: &v.discriminant, + }, + Some(&ast.ident), + &ast.generics, + ) + }) + .collect::>(), + Data::Struct(data) => { + vec![VariantInfo::new( + VariantAst { + attrs: &ast.attrs, + ident: &ast.ident, + fields: &data.fields, + discriminant: &None, + }, + None, + &ast.generics, + )] + } + Data::Union(_) => { + return Err(Error::new_spanned( + ast, + "unexpected unsupported untagged union", + )); + } + }; + + Ok(Structure { + variants, + omitted_variants: false, + ast, + extra_impl: vec![], + extra_predicates: vec![], + add_bounds: AddBounds::Both, + }) + } + + /// Returns a slice of the variants in this Structure. + pub fn variants(&self) -> &[VariantInfo<'a>] { + &self.variants + } + + /// Returns a mut slice of the variants in this Structure. + pub fn variants_mut(&mut self) -> &mut [VariantInfo<'a>] { + &mut self.variants + } + + /// Returns a reference to the underlying `syn` AST node which this + /// `Structure` was created from. + pub fn ast(&self) -> &'a DeriveInput { + self.ast + } + + /// True if any variants were omitted due to a `filter_variants` call. + pub fn omitted_variants(&self) -> bool { + self.omitted_variants + } + + /// Runs the passed-in function once for each bound field, passing in a `BindingInfo`. + /// and generating `match` arms which evaluate the returned tokens. + /// + /// This method will ignore variants or fields which are ignored through the + /// `filter` and `filter_variant` methods. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) => { + /// { println!("{:?}", __binding_0) } + /// { println!("{:?}", __binding_1) } + /// } + /// A::C(ref __binding_0,) => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn each(&self, mut f: F) -> TokenStream + where + F: FnMut(&BindingInfo<'_>) -> R, + R: ToTokens, + { + let mut t = TokenStream::new(); + for variant in &self.variants { + variant.each(&mut f).to_tokens(&mut t); + } + if self.omitted_variants { + quote!(_ => {}).to_tokens(&mut t); + } + t + } + + /// Runs the passed-in function once for each bound field, passing in the + /// result of the previous call, and a `BindingInfo`. generating `match` + /// arms which evaluate to the resulting tokens. + /// + /// This method will ignore variants or fields which are ignored through the + /// `filter` and `filter_variant` methods. + /// + /// If a variant has been ignored, it will return the `init` value. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.fold(quote!(0), |acc, bi| quote!(#acc + #bi)).to_string(), + /// + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) => { + /// 0 + __binding_0 + __binding_1 + /// } + /// A::C(ref __binding_0,) => { + /// 0 + __binding_0 + /// } + /// }.to_string() + /// ); + /// ``` + pub fn fold(&self, init: I, mut f: F) -> TokenStream + where + F: FnMut(TokenStream, &BindingInfo<'_>) -> R, + I: ToTokens, + R: ToTokens, + { + let mut t = TokenStream::new(); + for variant in &self.variants { + variant.fold(&init, &mut f).to_tokens(&mut t); + } + if self.omitted_variants { + quote!(_ => { #init }).to_tokens(&mut t); + } + t + } + + /// Runs the passed-in function once for each variant, passing in a + /// `VariantInfo`. and generating `match` arms which evaluate the returned + /// tokens. + /// + /// This method will ignore variants and not bind fields which are ignored + /// through the `filter` and `filter_variant` methods. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let s = Structure::new(&di); + /// + /// assert_eq!( + /// s.each_variant(|v| { + /// let name = &v.ast().ident; + /// quote!(println!(stringify!(#name))) + /// }).to_string(), + /// + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) => { + /// println!(stringify!(B)) + /// } + /// A::C(ref __binding_0,) => { + /// println!(stringify!(C)) + /// } + /// }.to_string() + /// ); + /// ``` + pub fn each_variant(&self, mut f: F) -> TokenStream + where + F: FnMut(&VariantInfo<'_>) -> R, + R: ToTokens, + { + let mut t = TokenStream::new(); + for variant in &self.variants { + let pat = variant.pat(); + let body = f(variant); + quote!(#pat => { #body }).to_tokens(&mut t); + } + if self.omitted_variants { + quote!(_ => {}).to_tokens(&mut t); + } + t + } + + /// Filter the bindings created by this `Structure` object. This has 2 effects: + /// + /// * The bindings will no longer appear in match arms generated by methods + /// on this `Structure` or its subobjects. + /// + /// * Impl blocks created with the `bound_impl` or `unsafe_bound_impl` + /// method only consider type parameters referenced in the types of + /// non-filtered fields. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C{ a: u32 }, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter(|bi| { + /// bi.ast().ident == Some(quote::format_ident!("a")) + /// }); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ a: ref __binding_0, .. } => { + /// { println!("{:?}", __binding_0) } + /// } + /// A::C{ a: ref __binding_0, } => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn filter(&mut self, mut f: F) -> &mut Self + where + F: FnMut(&BindingInfo<'_>) -> bool, + { + for variant in &mut self.variants { + variant.filter(&mut f); + } + self + } + + /// Iterates all the bindings of this `Structure` object and uses a closure to determine if a + /// binding should be removed. If the closure returns `true` the binding is removed from the + /// structure. If the closure returns `false`, the binding remains in the structure. + /// + /// All the removed bindings are moved to a new `Structure` object which is otherwise identical + /// to the current one. To understand the effects of removing a binding from a structure check + /// the [`Structure::filter`] documentation. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C{ a: u32 }, + /// } + /// }; + /// let mut with_b = Structure::new(&di); + /// + /// let with_a = with_b.drain_filter(|bi| { + /// bi.ast().ident == Some(quote::format_ident!("a")) + /// }); + /// + /// assert_eq!( + /// with_a.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ a: ref __binding_0, .. } => { + /// { println!("{:?}", __binding_0) } + /// } + /// A::C{ a: ref __binding_0, } => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// + /// assert_eq!( + /// with_b.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ b: ref __binding_1, .. } => { + /// { println!("{:?}", __binding_1) } + /// } + /// A::C{ .. } => { + /// + /// } + /// }.to_string() + /// ); + /// ``` + #[allow(clippy::return_self_not_must_use)] + pub fn drain_filter(&mut self, mut f: F) -> Self + where + F: FnMut(&BindingInfo<'_>) -> bool, + { + Self { + variants: self + .variants + .iter_mut() + .map(|variant| variant.drain_filter(&mut f)) + .collect(), + omitted_variants: self.omitted_variants, + ast: self.ast, + extra_impl: self.extra_impl.clone(), + extra_predicates: self.extra_predicates.clone(), + add_bounds: self.add_bounds, + } + } + + /// Specify additional where predicate bounds which should be generated by + /// impl-generating functions such as `gen_impl`, `bound_impl`, and + /// `unsafe_bound_impl`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// // Add an additional where predicate. + /// s.add_where_predicate(syn::parse_quote!(T: std::fmt::Display)); + /// + /// assert_eq!( + /// s.bound_impl(quote!(krate::Trait), quote!{ + /// fn a() {} + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where T: std::fmt::Display, + /// T: krate::Trait, + /// Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + pub fn add_where_predicate(&mut self, pred: WherePredicate) -> &mut Self { + self.extra_predicates.push(pred); + self + } + + /// Specify which bounds should be generated by impl-generating functions + /// such as `gen_impl`, `bound_impl`, and `unsafe_bound_impl`. + /// + /// The default behaviour is to generate both field and generic bounds from + /// type parameters. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// // Limit bounds to only generics. + /// s.add_bounds(AddBounds::Generics); + /// + /// assert_eq!( + /// s.bound_impl(quote!(krate::Trait), quote!{ + /// fn a() {} + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where T: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + pub fn add_bounds(&mut self, mode: AddBounds) -> &mut Self { + self.add_bounds = mode; + self + } + + /// Filter the variants matched by this `Structure` object. This has 2 effects: + /// + /// * Match arms destructuring these variants will no longer be generated by + /// methods on this `Structure` + /// + /// * Impl blocks created with the `bound_impl` or `unsafe_bound_impl` + /// method only consider type parameters referenced in the types of + /// fields in non-fitered variants. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "B"); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::C(ref __binding_0,) => { + /// { println!("{:?}", __binding_0) } + /// } + /// _ => {} + /// }.to_string() + /// ); + /// ``` + pub fn filter_variants(&mut self, f: F) -> &mut Self + where + F: FnMut(&VariantInfo<'_>) -> bool, + { + let before_len = self.variants.len(); + self.variants.retain(f); + if self.variants.len() != before_len { + self.omitted_variants = true; + } + self + } + /// Iterates all the variants of this `Structure` object and uses a closure to determine if a + /// variant should be removed. If the closure returns `true` the variant is removed from the + /// structure. If the closure returns `false`, the variant remains in the structure. + /// + /// All the removed variants are moved to a new `Structure` object which is otherwise identical + /// to the current one. To understand the effects of removing a variant from a structure check + /// the [`Structure::filter_variants`] documentation. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// + /// let mut with_c = Structure::new(&di); + /// + /// let with_b = with_c.drain_filter_variants(|v| v.ast().ident == "B"); + /// + /// assert_eq!( + /// with_c.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::C(ref __binding_0,) => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// + /// assert_eq!( + /// with_b.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B(ref __binding_0, ref __binding_1,) => { + /// { println!("{:?}", __binding_0) } + /// { println!("{:?}", __binding_1) } + /// } + /// }.to_string() + /// ); + #[allow(clippy::return_self_not_must_use)] + pub fn drain_filter_variants(&mut self, mut f: F) -> Self + where + F: FnMut(&VariantInfo<'_>) -> bool, + { + let mut other = Self { + variants: vec![], + omitted_variants: self.omitted_variants, + ast: self.ast, + extra_impl: self.extra_impl.clone(), + extra_predicates: self.extra_predicates.clone(), + add_bounds: self.add_bounds, + }; + + let (other_variants, self_variants) = self.variants.drain(..).partition(&mut f); + other.variants = other_variants; + self.variants = self_variants; + + other + } + + /// Remove the variant at the given index. + /// + /// # Panics + /// + /// Panics if the index is out of range. + pub fn remove_variant(&mut self, idx: usize) -> &mut Self { + self.variants.remove(idx); + self.omitted_variants = true; + self + } + + /// Updates the `BindStyle` for each of the passed-in fields by calling the + /// passed-in function for each `BindingInfo`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(i32, i32), + /// C(u32), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.bind_with(|bi| BindStyle::RefMut); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B(ref mut __binding_0, ref mut __binding_1,) => { + /// { println!("{:?}", __binding_0) } + /// { println!("{:?}", __binding_1) } + /// } + /// A::C(ref mut __binding_0,) => { + /// { println!("{:?}", __binding_0) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn bind_with(&mut self, mut f: F) -> &mut Self + where + F: FnMut(&BindingInfo<'_>) -> BindStyle, + { + for variant in &mut self.variants { + variant.bind_with(&mut f); + } + self + } + + /// Updates the binding name for each fo the passed-in fields by calling the + /// passed-in function for each `BindingInfo`. + /// + /// The function will be called with the `BindingInfo` and its index in the + /// enclosing variant. + /// + /// The default name is `__binding_{}` where `{}` is replaced with an + /// increasing number. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B{ a: i32, b: i32 }, + /// C{ a: u32 }, + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.binding_name(|bi, i| bi.ident.clone().unwrap()); + /// + /// assert_eq!( + /// s.each(|bi| quote!(println!("{:?}", #bi))).to_string(), + /// + /// quote!{ + /// A::B{ a: ref a, b: ref b, } => { + /// { println!("{:?}", a) } + /// { println!("{:?}", b) } + /// } + /// A::C{ a: ref a, } => { + /// { println!("{:?}", a) } + /// } + /// }.to_string() + /// ); + /// ``` + pub fn binding_name(&mut self, mut f: F) -> &mut Self + where + F: FnMut(&Field, usize) -> Ident, + { + for variant in &mut self.variants { + variant.binding_name(&mut f); + } + self + } + + /// Returns a list of the type parameters which are refrenced in the types + /// of non-filtered fields / variants. + /// + /// # Caveat + /// + /// If the struct contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T, i32), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "C"); + /// + /// assert_eq!( + /// s.referenced_ty_params(), + /// &["e::format_ident!("T")] + /// ); + /// ``` + pub fn referenced_ty_params(&self) -> Vec<&'a Ident> { + let mut flags = Vec::new(); + for variant in &self.variants { + for binding in &variant.bindings { + generics_fuse(&mut flags, &binding.seen_generics); + } + } + fetch_generics(&flags, &self.ast.generics) + } + + /// Adds an `impl<>` generic parameter. + /// This can be used when the trait to be derived needs some extra generic parameters. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// let generic: syn::GenericParam = syn::parse_quote!(X: krate::AnotherTrait); + /// + /// assert_eq!( + /// s.add_impl_generic(generic) + /// .bound_impl(quote!(krate::Trait), + /// quote!{ + /// fn a() {} + /// } + /// ).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where T : krate :: Trait < X >, + /// Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + pub fn add_impl_generic(&mut self, param: GenericParam) -> &mut Self { + self.extra_impl.push(param); + self + } + + /// Add trait bounds for a trait with the given path for each type parmaeter + /// referenced in the types of non-filtered fields. + /// + /// # Caveat + /// + /// If the method contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + pub fn add_trait_bounds( + &self, + bound: &TraitBound, + where_clause: &mut Option, + mode: AddBounds, + ) { + // If we have any explicit where predicates, make sure to add them first. + if !self.extra_predicates.is_empty() { + let clause = get_or_insert_with(&mut *where_clause, || WhereClause { + where_token: Default::default(), + predicates: punctuated::Punctuated::new(), + }); + clause + .predicates + .extend(self.extra_predicates.iter().cloned()); + } + + let mut seen = HashSet::new(); + let mut pred = |ty: Type| { + if !seen.contains(&ty) { + seen.insert(ty.clone()); + + // Add a predicate. + let clause = get_or_insert_with(&mut *where_clause, || WhereClause { + where_token: Default::default(), + predicates: punctuated::Punctuated::new(), + }); + clause.predicates.push(WherePredicate::Type(PredicateType { + lifetimes: None, + bounded_ty: ty, + colon_token: Default::default(), + bounds: Some(punctuated::Pair::End(TypeParamBound::Trait(bound.clone()))) + .into_iter() + .collect(), + })); + } + }; + + for variant in &self.variants { + for binding in &variant.bindings { + match mode { + AddBounds::Both | AddBounds::Fields => { + for &seen in &binding.seen_generics { + if seen { + pred(binding.ast().ty.clone()); + break; + } + } + } + _ => {} + } + + match mode { + AddBounds::Both | AddBounds::Generics => { + for param in binding.referenced_ty_params() { + pred(Type::Path(TypePath { + qself: None, + path: (*param).clone().into(), + })); + } + } + _ => {} + } + } + } + } + + /// This method is a no-op, underscore consts are used by default now. + pub fn underscore_const(&mut self, _enabled: bool) -> &mut Self { + self + } + + /// > NOTE: This methods' features are superceded by `Structure::gen_impl`. + /// + /// Creates an `impl` block with the required generic type fields filled in + /// to implement the trait `path`. + /// + /// This method also adds where clauses to the impl requiring that all + /// referenced type parmaeters implement the trait `path`. + /// + /// # Hygiene and Paths + /// + /// This method wraps the impl block inside of a `const` (see the example + /// below). In this scope, the first segment of the passed-in path is + /// `extern crate`-ed in. If you don't want to generate that `extern crate` + /// item, use a global path. + /// + /// This means that if you are implementing `my_crate::Trait`, you simply + /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the + /// entirety of the definition, you can refer to your crate as `my_crate`. + /// + /// # Caveat + /// + /// If the method contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + /// + /// # Panics + /// + /// Panics if the path string parameter is not a valid `TraitBound`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "B"); + /// + /// assert_eq!( + /// s.bound_impl(quote!(krate::Trait), quote!{ + /// fn a() {} + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + pub fn bound_impl(&self, path: P, body: B) -> TokenStream { + self.impl_internal( + path.into_token_stream(), + body.into_token_stream(), + quote!(), + None, + ) + } + + /// > NOTE: This methods' features are superceded by `Structure::gen_impl`. + /// + /// Creates an `impl` block with the required generic type fields filled in + /// to implement the unsafe trait `path`. + /// + /// This method also adds where clauses to the impl requiring that all + /// referenced type parmaeters implement the trait `path`. + /// + /// # Hygiene and Paths + /// + /// This method wraps the impl block inside of a `const` (see the example + /// below). In this scope, the first segment of the passed-in path is + /// `extern crate`-ed in. If you don't want to generate that `extern crate` + /// item, use a global path. + /// + /// This means that if you are implementing `my_crate::Trait`, you simply + /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the + /// entirety of the definition, you can refer to your crate as `my_crate`. + /// + /// # Caveat + /// + /// If the method contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + /// + /// # Panics + /// + /// Panics if the path string parameter is not a valid `TraitBound`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "B"); + /// + /// assert_eq!( + /// s.unsafe_bound_impl(quote!(krate::Trait), quote!{ + /// fn a() {} + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// unsafe impl krate::Trait for A + /// where Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + pub fn unsafe_bound_impl(&self, path: P, body: B) -> TokenStream { + self.impl_internal( + path.into_token_stream(), + body.into_token_stream(), + quote!(unsafe), + None, + ) + } + + /// > NOTE: This methods' features are superceded by `Structure::gen_impl`. + /// + /// Creates an `impl` block with the required generic type fields filled in + /// to implement the trait `path`. + /// + /// This method will not add any where clauses to the impl. + /// + /// # Hygiene and Paths + /// + /// This method wraps the impl block inside of a `const` (see the example + /// below). In this scope, the first segment of the passed-in path is + /// `extern crate`-ed in. If you don't want to generate that `extern crate` + /// item, use a global path. + /// + /// This means that if you are implementing `my_crate::Trait`, you simply + /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the + /// entirety of the definition, you can refer to your crate as `my_crate`. + /// + /// # Panics + /// + /// Panics if the path string parameter is not a valid `TraitBound`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "B"); + /// + /// assert_eq!( + /// s.unbound_impl(quote!(krate::Trait), quote!{ + /// fn a() {} + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + pub fn unbound_impl(&self, path: P, body: B) -> TokenStream { + self.impl_internal( + path.into_token_stream(), + body.into_token_stream(), + quote!(), + Some(AddBounds::None), + ) + } + + /// > NOTE: This methods' features are superceded by `Structure::gen_impl`. + /// + /// Creates an `impl` block with the required generic type fields filled in + /// to implement the unsafe trait `path`. + /// + /// This method will not add any where clauses to the impl. + /// + /// # Hygiene and Paths + /// + /// This method wraps the impl block inside of a `const` (see the example + /// below). In this scope, the first segment of the passed-in path is + /// `extern crate`-ed in. If you don't want to generate that `extern crate` + /// item, use a global path. + /// + /// This means that if you are implementing `my_crate::Trait`, you simply + /// write `s.bound_impl(quote!(my_crate::Trait), quote!(...))`, and for the + /// entirety of the definition, you can refer to your crate as `my_crate`. + /// + /// # Panics + /// + /// Panics if the path string parameter is not a valid `TraitBound`. + /// + /// # Example + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "B"); + /// + /// assert_eq!( + /// s.unsafe_unbound_impl(quote!(krate::Trait), quote!{ + /// fn a() {} + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// unsafe impl krate::Trait for A { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + #[deprecated] + pub fn unsafe_unbound_impl(&self, path: P, body: B) -> TokenStream { + self.impl_internal( + path.into_token_stream(), + body.into_token_stream(), + quote!(unsafe), + Some(AddBounds::None), + ) + } + + fn impl_internal( + &self, + path: TokenStream, + body: TokenStream, + safety: TokenStream, + mode: Option, + ) -> TokenStream { + let mode = mode.unwrap_or(self.add_bounds); + let name = &self.ast.ident; + let mut gen_clone = self.ast.generics.clone(); + gen_clone.params.extend(self.extra_impl.iter().cloned()); + let (impl_generics, _, _) = gen_clone.split_for_impl(); + let (_, ty_generics, where_clause) = self.ast.generics.split_for_impl(); + + let bound = syn::parse2::(path) + .expect("`path` argument must be a valid rust trait bound"); + + let mut where_clause = where_clause.cloned(); + self.add_trait_bounds(&bound, &mut where_clause, mode); + + // This function is smart. If a global path is passed, no extern crate + // statement will be generated, however, a relative path will cause the + // crate which it is relative to to be imported within the current + // scope. + let mut extern_crate = quote!(); + if bound.path.leading_colon.is_none() { + if let Some(seg) = bound.path.segments.first() { + let seg = &seg.ident; + extern_crate = quote! { extern crate #seg; }; + } + } + + let generated = quote! { + #extern_crate + #safety impl #impl_generics #bound for #name #ty_generics #where_clause { + #body + } + }; + + quote! { + const _: () = { #generated }; + } + } + + /// Generate an impl block for the given struct. This impl block will + /// automatically use hygiene tricks to avoid polluting the caller's + /// namespace, and will automatically add trait bounds for generic type + /// parameters. + /// + /// # Syntax + /// + /// This function accepts its arguments as a `TokenStream`. The recommended way + /// to call this function is passing the result of invoking the `quote!` + /// macro to it. + /// + /// ```ignore + /// s.gen_impl(quote! { + /// // You can write any items which you want to import into scope here. + /// // For example, you may want to include an `extern crate` for the + /// // crate which implements your trait. These items will only be + /// // visible to the code you generate, and won't be exposed to the + /// // consuming crate + /// extern crate krate; + /// + /// // You can also add `use` statements here to bring types or traits + /// // into scope. + /// // + /// // WARNING: Try not to use common names here, because the stable + /// // version of syn does not support hygiene and you could accidentally + /// // shadow types from the caller crate. + /// use krate::Trait as MyTrait; + /// + /// // The actual impl block is a `gen impl` or `gen unsafe impl` block. + /// // You can use `@Self` to refer to the structure's type. + /// gen impl MyTrait for @Self { + /// fn f(&self) { ... } + /// } + /// }) + /// ``` + /// + /// The most common usage of this trait involves loading the crate the + /// target trait comes from with `extern crate`, and then invoking a `gen + /// impl` block. + /// + /// # Hygiene + /// + /// This method tries to handle hygiene intelligently for both stable and + /// unstable proc-macro implementations, however there are visible + /// differences. + /// + /// The output of every `gen_impl` function is wrapped in a dummy `const` + /// value, to ensure that it is given its own scope, and any values brought + /// into scope are not leaked to the calling crate. + /// + /// By default, the above invocation may generate an output like the + /// following: + /// + /// ```ignore + /// const _: () = { + /// extern crate krate; + /// use krate::Trait as MyTrait; + /// impl MyTrait for Struct where T: MyTrait { + /// fn f(&self) { ... } + /// } + /// }; + /// ``` + /// + /// ### Using the `std` crate + /// + /// If you are using `quote!()` to implement your trait, with the + /// `proc-macro2/nightly` feature, `std` isn't considered to be in scope for + /// your macro. This means that if you use types from `std` in your + /// procedural macro, you'll want to explicitly load it with an `extern + /// crate std;`. + /// + /// ### Absolute paths + /// + /// You should generally avoid using absolute paths in your generated code, + /// as they will resolve very differently when using the stable and nightly + /// versions of `proc-macro2`. Instead, load the crates you need to use + /// explictly with `extern crate` and + /// + /// # Trait Bounds + /// + /// This method will automatically add trait bounds for any type parameters + /// which are referenced within the types of non-ignored fields. + /// + /// Additional type parameters may be added with the generics syntax after + /// the `impl` keyword. + /// + /// ### Type Macro Caveat + /// + /// If the method contains any macros in type position, all parameters will + /// be considered bound. This is because we cannot determine which type + /// parameters are bound by type macros. + /// + /// # Errors + /// + /// This function will generate a `compile_error!` if additional type + /// parameters added by `impl<..>` conflict with generic type parameters on + /// the original struct. + /// + /// # Panics + /// + /// This function will panic if the input `TokenStream` is not well-formed. + /// + /// # Example Usage + /// + /// ``` + /// # use synstructure::*; + /// let di: syn::DeriveInput = syn::parse_quote! { + /// enum A { + /// B(T), + /// C(Option), + /// } + /// }; + /// let mut s = Structure::new(&di); + /// + /// s.filter_variants(|v| v.ast().ident != "B"); + /// + /// assert_eq!( + /// s.gen_impl(quote! { + /// extern crate krate; + /// gen impl krate::Trait for @Self { + /// fn a() {} + /// } + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where + /// Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// + /// // NOTE: You can also add extra generics after the impl + /// assert_eq!( + /// s.gen_impl(quote! { + /// extern crate krate; + /// gen impl krate::Trait for @Self + /// where + /// X: Send + Sync, + /// { + /// fn a() {} + /// } + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where + /// X: Send + Sync, + /// Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// }; + /// }.to_string() + /// ); + /// + /// // NOTE: you can generate multiple traits with a single call + /// assert_eq!( + /// s.gen_impl(quote! { + /// extern crate krate; + /// + /// gen impl krate::Trait for @Self { + /// fn a() {} + /// } + /// + /// gen impl krate::OtherTrait for @Self { + /// fn b() {} + /// } + /// }).to_string(), + /// quote!{ + /// const _: () = { + /// extern crate krate; + /// impl krate::Trait for A + /// where + /// Option: krate::Trait, + /// U: krate::Trait + /// { + /// fn a() {} + /// } + /// + /// impl krate::OtherTrait for A + /// where + /// Option: krate::OtherTrait, + /// U: krate::OtherTrait + /// { + /// fn b() {} + /// } + /// }; + /// }.to_string() + /// ); + /// ``` + /// + /// Use `add_bounds` to change which bounds are generated. + pub fn gen_impl(&self, cfg: TokenStream) -> TokenStream { + Parser::parse2( + |input: ParseStream<'_>| -> Result { self.gen_impl_parse(input, true) }, + cfg, + ) + .expect("Failed to parse gen_impl") + } + + fn gen_impl_parse(&self, input: ParseStream<'_>, wrap: bool) -> Result { + fn parse_prefix(input: ParseStream<'_>) -> Result> { + if input.parse::()? != "gen" { + return Err(input.error("Expected keyword `gen`")); + } + let safety = input.parse::>()?; + let _ = input.parse::()?; + Ok(safety) + } + + let mut before = vec![]; + loop { + if parse_prefix(&input.fork()).is_ok() { + break; + } + before.push(input.parse::()?); + } + + // Parse the prefix "for real" + let safety = parse_prefix(input)?; + + // optional `<>` + let mut generics = input.parse::()?; + + // @bound + let bound = input.parse::()?; + + // `for @Self` + let _ = input.parse::()?; + let _ = input.parse::()?; + let _ = input.parse::()?; + + // optional `where ...` + generics.where_clause = input.parse()?; + + // Body of the impl + let body; + braced!(body in input); + let body = body.parse::()?; + + // Try to parse the next entry in sequence. If this fails, we'll fall + // back to just parsing the entire rest of the TokenStream. + let maybe_next_impl = self.gen_impl_parse(&input.fork(), false); + + // Eat tokens to the end. Whether or not our speculative nested parse + // succeeded, we're going to want to consume the rest of our input. + let mut after = input.parse::()?; + if let Ok(stream) = maybe_next_impl { + after = stream; + } + assert!(input.is_empty(), "Should've consumed the rest of our input"); + + /* Codegen Logic */ + let name = &self.ast.ident; + + // Add the generics from the original struct in, and then add any + // additional trait bounds which we need on the type. + if let Err(err) = merge_generics(&mut generics, &self.ast.generics) { + // Report the merge error as a `compile_error!`, as it may be + // triggerable by an end-user. + return Ok(err.to_compile_error()); + } + + self.add_trait_bounds(&bound, &mut generics.where_clause, self.add_bounds); + let (impl_generics, _, where_clause) = generics.split_for_impl(); + let (_, ty_generics, _) = self.ast.generics.split_for_impl(); + + let generated = quote! { + #(#before)* + #safety impl #impl_generics #bound for #name #ty_generics #where_clause { + #body + } + #after + }; + + if wrap { + Ok(quote! { + const _: () = { #generated }; + }) + } else { + Ok(generated) + } + } +} + +/// Dumps an unpretty version of a tokenstream. Takes any type which implements +/// `Display`. +/// +/// This is mostly useful for visualizing the output of a procedural macro, as +/// it makes it marginally more readable. It is used in the implementation of +/// `test_derive!` to unprettily print the output. +/// +/// # Stability +/// +/// The stability of the output of this function is not guaranteed. Do not +/// assert that the output of this function does not change between minor +/// versions. +/// +/// # Example +/// +/// ``` +/// # use quote::quote; +/// assert_eq!( +/// synstructure::unpretty_print(quote! { +/// const _: () = { +/// extern crate krate; +/// impl krate::Trait for A +/// where +/// Option: krate::Trait, +/// U: krate::Trait +/// { +/// fn a() {} +/// } +/// }; +/// }), +/// "const _ : ( +/// ) +/// = { +/// extern crate krate ; +/// impl < T , U > krate :: Trait for A < T , U > where Option < U > : krate :: Trait , U : krate :: Trait { +/// fn a ( +/// ) +/// { +/// } +/// } +/// } +/// ; +/// " +/// ) +/// ``` +pub fn unpretty_print(ts: T) -> String { + let mut res = String::new(); + + let raw_s = ts.to_string(); + let mut s = &raw_s[..]; + let mut indent = 0; + while let Some(i) = s.find(&['(', '{', '[', ')', '}', ']', ';'][..]) { + match &s[i..=i] { + "(" | "{" | "[" => indent += 1, + ")" | "}" | "]" => indent -= 1, + _ => {} + } + res.push_str(&s[..=i]); + res.push('\n'); + for _ in 0..indent { + res.push_str(" "); + } + s = trim_start_matches(&s[i + 1..], ' '); + } + res.push_str(s); + res +} + +/// `trim_left_matches` has been deprecated in favor of `trim_start_matches`. +/// This helper silences the warning, as we need to continue using +/// `trim_left_matches` for rust 1.15 support. +#[allow(deprecated)] +fn trim_start_matches(s: &str, c: char) -> &str { + s.trim_left_matches(c) +} + +/// Helper trait describing values which may be returned by macro implementation +/// methods used by this crate's macros. +pub trait MacroResult { + /// Convert this result into a `Result` for further processing / validation. + fn into_result(self) -> Result; + + /// Convert this result into a `proc_macro::TokenStream`, ready to return + /// from a native `proc_macro` implementation. + /// + /// If `into_result()` would return an `Err`, this method should instead + /// generate a `compile_error!` invocation to nicely report the error. + /// + /// *This method is available if `synstructure` is built with the + /// `"proc-macro"` feature.* + #[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" + ))] + fn into_stream(self) -> proc_macro::TokenStream + where + Self: Sized, + { + match self.into_result() { + Ok(ts) => ts.into(), + Err(err) => err.to_compile_error().into(), + } + } +} + +#[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" +))] +impl MacroResult for proc_macro::TokenStream { + fn into_result(self) -> Result { + Ok(self.into()) + } + + fn into_stream(self) -> proc_macro::TokenStream { + self + } +} + +impl MacroResult for TokenStream { + fn into_result(self) -> Result { + Ok(self) + } +} + +impl MacroResult for Result { + fn into_result(self) -> Result { + match self { + Ok(v) => v.into_result(), + Err(err) => Err(err), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // Regression test for #48 + #[test] + fn test_each_enum() { + let di: syn::DeriveInput = syn::parse_quote! { + enum A { + Foo(usize, bool), + Bar(bool, usize), + Baz(usize, bool, usize), + Quux(bool, usize, bool) + } + }; + let mut s = Structure::new(&di); + + s.filter(|bi| bi.ast().ty.to_token_stream().to_string() == "bool"); + + assert_eq!( + s.each(|bi| quote!(do_something(#bi))).to_string(), + quote! { + A::Foo(_, ref __binding_1,) => { { do_something(__binding_1) } } + A::Bar(ref __binding_0, ..) => { { do_something(__binding_0) } } + A::Baz(_, ref __binding_1, ..) => { { do_something(__binding_1) } } + A::Quux(ref __binding_0, _, ref __binding_2,) => { + { + do_something(__binding_0) + } + { + do_something(__binding_2) + } + } + } + .to_string() + ); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/src/macros.rs b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/src/macros.rs new file mode 100644 index 000000000000..dc17ce3fdb3d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/synstructure-0.13.1/src/macros.rs @@ -0,0 +1,262 @@ +//! This module provides two utility macros for testing custom derives. They can +//! be used together to eliminate some of the boilerplate required in order to +//! declare and test custom derive implementations. + +// Re-exports used by the decl_derive! and test_derive! +pub use proc_macro2::TokenStream as TokenStream2; +pub use syn::{parse2, parse_str, DeriveInput}; +pub use quote::quote; + +#[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" +))] +pub use proc_macro::TokenStream; +#[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" +))] +pub use syn::parse; + +/// The `decl_derive!` macro declares a custom derive wrapper. It will parse the +/// incoming `TokenStream` into a `synstructure::Structure` object, and pass it +/// into the inner function. +/// +/// Your inner function should take a `synstructure::Structure` by value, and +/// return a type implementing `synstructure::MacroResult`, for example: +/// +/// ``` +/// fn derive_simple(input: synstructure::Structure) -> proc_macro2::TokenStream { +/// unimplemented!() +/// } +/// +/// fn derive_result(input: synstructure::Structure) +/// -> syn::Result +/// { +/// unimplemented!() +/// } +/// ``` +/// +/// # Usage +/// +/// ### Without Attributes +/// ``` +/// fn derive_interesting(_input: synstructure::Structure) -> proc_macro2::TokenStream { +/// quote::quote! { ... } +/// } +/// +/// # const _IGNORE: &'static str = stringify! { +/// decl_derive!([Interesting] => derive_interesting); +/// # }; +/// ``` +/// +/// ### With Attributes +/// ``` +/// # fn main() {} +/// fn derive_interesting(_input: synstructure::Structure) -> proc_macro2::TokenStream { +/// quote::quote! { ... } +/// } +/// +/// # const _IGNORE: &'static str = stringify! { +/// decl_derive!([Interesting, attributes(interesting_ignore)] => derive_interesting); +/// # }; +/// ``` +/// +/// ### Decl Attributes & Doc Comments +/// ``` +/// # fn main() {} +/// fn derive_interesting(_input: synstructure::Structure) -> proc_macro2::TokenStream { +/// quote::quote! { ... } +/// } +/// +/// # const _IGNORE: &'static str = stringify! { +/// decl_derive! { +/// [Interesting] => +/// #[allow(some_lint)] +/// /// Documentation Comments +/// derive_interesting +/// } +/// # }; +/// ``` +/// +/// *This macro is available if `synstructure` is built with the `"proc-macro"` +/// feature.* +#[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" +))] +#[macro_export] +macro_rules! decl_derive { + // XXX: Switch to using this variant everywhere? + ([$derives:ident $($derive_t:tt)*] => $(#[$($attrs:tt)*])* $inner:path) => { + #[proc_macro_derive($derives $($derive_t)*)] + #[allow(non_snake_case)] + $(#[$($attrs)*])* + pub fn $derives( + i: $crate::macros::TokenStream + ) -> $crate::macros::TokenStream { + match $crate::macros::parse::<$crate::macros::DeriveInput>(i) { + ::core::result::Result::Ok(p) => { + match $crate::Structure::try_new(&p) { + ::core::result::Result::Ok(s) => $crate::MacroResult::into_stream($inner(s)), + ::core::result::Result::Err(e) => { + ::core::convert::Into::into(e.to_compile_error()) + } + } + } + ::core::result::Result::Err(e) => { + ::core::convert::Into::into(e.to_compile_error()) + } + } + } + }; +} + +/// The `decl_attribute!` macro declares a custom attribute wrapper. It will +/// parse the incoming `TokenStream` into a `synstructure::Structure` object, +/// and pass it into the inner function. +/// +/// Your inner function should have the following type: +/// +/// ``` +/// fn attribute( +/// attr: proc_macro2::TokenStream, +/// structure: synstructure::Structure, +/// ) -> proc_macro2::TokenStream { +/// unimplemented!() +/// } +/// ``` +/// +/// # Usage +/// +/// ``` +/// fn attribute_interesting( +/// _attr: proc_macro2::TokenStream, +/// _structure: synstructure::Structure, +/// ) -> proc_macro2::TokenStream { +/// quote::quote! { ... } +/// } +/// +/// # const _IGNORE: &'static str = stringify! { +/// decl_attribute!([interesting] => attribute_interesting); +/// # }; +/// ``` +/// +/// *This macro is available if `synstructure` is built with the `"proc-macro"` +/// feature.* +#[cfg(all( + not(all(target_arch = "wasm32", any(target_os = "unknown", target_os = "wasi"))), + feature = "proc-macro" +))] +#[macro_export] +macro_rules! decl_attribute { + ([$attribute:ident] => $(#[$($attrs:tt)*])* $inner:path) => { + #[proc_macro_attribute] + $(#[$($attrs)*])* + pub fn $attribute( + attr: $crate::macros::TokenStream, + i: $crate::macros::TokenStream, + ) -> $crate::macros::TokenStream { + match $crate::macros::parse::<$crate::macros::DeriveInput>(i) { + ::core::result::Result::Ok(p) => match $crate::Structure::try_new(&p) { + ::core::result::Result::Ok(s) => { + $crate::MacroResult::into_stream( + $inner(::core::convert::Into::into(attr), s) + ) + } + ::core::result::Result::Err(e) => { + ::core::convert::Into::into(e.to_compile_error()) + } + }, + ::core::result::Result::Err(e) => { + ::core::convert::Into::into(e.to_compile_error()) + } + } + } + }; +} + +/// Run a test on a custom derive. This macro expands both the original struct +/// and the expansion to ensure that they compile correctly, and confirms that +/// feeding the original struct into the named derive will produce the written +/// output. +/// +/// You can add `no_build` to the end of the macro invocation to disable +/// checking that the written code compiles. This is useful in contexts where +/// the procedural macro cannot depend on the crate where it is used during +/// tests. +/// +/// # Usage +/// +/// ``` +/// fn test_derive_example(_s: synstructure::Structure) +/// -> Result +/// { +/// Ok(quote::quote! { const YOUR_OUTPUT: &'static str = "here"; }) +/// } +/// +/// fn main() { +/// synstructure::test_derive!{ +/// test_derive_example { +/// struct A; +/// } +/// expands to { +/// const YOUR_OUTPUT: &'static str = "here"; +/// } +/// } +/// } +/// ``` +#[macro_export] +macro_rules! test_derive { + ($name:path { $($i:tt)* } expands to { $($o:tt)* }) => { + { + #[allow(dead_code)] + fn ensure_compiles() { + $($i)* + $($o)* + } + + $crate::test_derive!($name { $($i)* } expands to { $($o)* } no_build); + } + }; + + ($name:path { $($i:tt)* } expands to { $($o:tt)* } no_build) => { + { + let i = $crate::macros::quote!( $($i)* ); + let parsed = $crate::macros::parse2::<$crate::macros::DeriveInput>(i) + .expect(::core::concat!( + "Failed to parse input to `#[derive(", + ::core::stringify!($name), + ")]`", + )); + + let raw_res = $name($crate::Structure::new(&parsed)); + let res = $crate::MacroResult::into_result(raw_res) + .expect(::core::concat!( + "Procedural macro failed for `#[derive(", + ::core::stringify!($name), + ")]`", + )); + + let expected_toks = $crate::macros::quote!( $($o)* ); + if <$crate::macros::TokenStream2 as ::std::string::ToString>::to_string(&res) + != <$crate::macros::TokenStream2 as ::std::string::ToString>::to_string(&expected_toks) + { + panic!("\ +test_derive failed: +expected: +``` +{} +``` + +got: +``` +{} +```\n", + $crate::unpretty_print(&expected_toks), + $crate::unpretty_print(&res), + ); + } + } + }; +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.cargo_vcs_info.json deleted file mode 100644 index 302d1414182e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "74bfe75eb25ba9d39b0ae5b570d611855cbc5086" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.clippy.toml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.clippy.toml deleted file mode 100644 index 3d30690f1204..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.clippy.toml +++ /dev/null @@ -1 +0,0 @@ -msrv = "1.31.0" diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.github/workflows/ci.yml deleted file mode 100644 index e1db2edb7b6b..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/.github/workflows/ci.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: CI - -on: - push: - pull_request: - schedule: [cron: "40 1 * * *"] - -permissions: - contents: read - -env: - RUSTFLAGS: -Dwarnings - -jobs: - test: - name: Rust ${{matrix.rust}} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - rust: [beta, stable, 1.56.0] - include: - - rust: nightly - rustflags: --cfg thiserror_nightly_testing - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{matrix.rust}} - components: rust-src - - run: cargo test --all - env: - RUSTFLAGS: ${{matrix.rustflags}} ${{env.RUSTFLAGS}} - - msrv: - name: Rust 1.31.0 - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@1.31.0 - with: - components: rust-src - - run: cargo check - - clippy: - name: Clippy - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@nightly - with: - components: clippy, rust-src - - run: cargo clippy --tests -- -Dclippy::all -Dclippy::pedantic - - miri: - name: Miri - runs-on: ubuntu-latest - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@miri - - run: cargo miri test - env: - MIRIFLAGS: -Zmiri-strict-provenance - - outdated: - name: Outdated - runs-on: ubuntu-latest - if: github.event_name != 'pull_request' - timeout-minutes: 45 - steps: - - uses: actions/checkout@v3 - - uses: dtolnay/install@cargo-outdated - - run: cargo outdated --workspace --exit-code 1 diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/Cargo.toml deleted file mode 100644 index e235408d809b..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.31" -name = "thiserror" -version = "1.0.38" -authors = ["David Tolnay "] -description = "derive(Error)" -documentation = "https://docs.rs/thiserror" -readme = "README.md" -keywords = [ - "error", - "error-handling", - "derive", -] -categories = ["rust-patterns"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/thiserror" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies.thiserror-impl] -version = "=1.0.38" - -[dev-dependencies.anyhow] -version = "1.0.65" - -[dev-dependencies.ref-cast] -version = "1.0" - -[dev-dependencies.rustversion] -version = "1.0" - -[dev-dependencies.trybuild] -version = "1.0.66" -features = ["diff"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/Cargo.toml.orig deleted file mode 100644 index 82ef03d8b7e8..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/Cargo.toml.orig +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "thiserror" -version = "1.0.38" -authors = ["David Tolnay "] -categories = ["rust-patterns"] -description = "derive(Error)" -documentation = "https://docs.rs/thiserror" -edition = "2018" -keywords = ["error", "error-handling", "derive"] -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/thiserror" -rust-version = "1.31" - -[dependencies] -thiserror-impl = { version = "=1.0.38", path = "impl" } - -[dev-dependencies] -anyhow = "1.0.65" -ref-cast = "1.0" -rustversion = "1.0" -trybuild = { version = "1.0.66", features = ["diff"] } - -[workspace] -members = ["impl"] - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/README.md b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/README.md deleted file mode 100644 index 3ba375ff3f96..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/README.md +++ /dev/null @@ -1,222 +0,0 @@ -derive(Error) -============= - -[github](https://github.com/dtolnay/thiserror) -[crates.io](https://crates.io/crates/thiserror) -[docs.rs](https://docs.rs/thiserror) -[build status](https://github.com/dtolnay/thiserror/actions?query=branch%3Amaster) - -This library provides a convenient derive macro for the standard library's -[`std::error::Error`] trait. - -[`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html - -```toml -[dependencies] -thiserror = "1.0" -``` - -*Compiler support: requires rustc 1.31+* - -
- -## Example - -```rust -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum DataStoreError { - #[error("data store disconnected")] - Disconnect(#[from] io::Error), - #[error("the data for key `{0}` is not available")] - Redaction(String), - #[error("invalid header (expected {expected:?}, found {found:?})")] - InvalidHeader { - expected: String, - found: String, - }, - #[error("unknown data store error")] - Unknown, -} -``` - -
- -## Details - -- Thiserror deliberately does not appear in your public API. You get the same - thing as if you had written an implementation of `std::error::Error` by hand, - and switching from handwritten impls to thiserror or vice versa is not a - breaking change. - -- Errors may be enums, structs with named fields, tuple structs, or unit - structs. - -- A `Display` impl is generated for your error if you provide `#[error("...")]` - messages on the struct or each variant of your enum, as shown above in the - example. - - The messages support a shorthand for interpolating fields from the error. - - - `#[error("{var}")]` ⟶ `write!("{}", self.var)` - - `#[error("{0}")]` ⟶ `write!("{}", self.0)` - - `#[error("{var:?}")]` ⟶ `write!("{:?}", self.var)` - - `#[error("{0:?}")]` ⟶ `write!("{:?}", self.0)` - - These shorthands can be used together with any additional format args, which - may be arbitrary expressions. For example: - - ```rust - #[derive(Error, Debug)] - pub enum Error { - #[error("invalid rdo_lookahead_frames {0} (expected < {})", i32::MAX)] - InvalidLookahead(u32), - } - ``` - - If one of the additional expression arguments needs to refer to a field of the - struct or enum, then refer to named fields as `.var` and tuple fields as `.0`. - - ```rust - #[derive(Error, Debug)] - pub enum Error { - #[error("first letter must be lowercase but was {:?}", first_char(.0))] - WrongCase(String), - #[error("invalid index {idx}, expected at least {} and at most {}", .limits.lo, .limits.hi)] - OutOfBounds { idx: usize, limits: Limits }, - } - ``` - -- A `From` impl is generated for each variant containing a `#[from]` attribute. - - Note that the variant must not contain any other fields beyond the source - error and possibly a backtrace. A backtrace is captured from within the `From` - impl if there is a field for it. - - ```rust - #[derive(Error, Debug)] - pub enum MyError { - Io { - #[from] - source: io::Error, - backtrace: Backtrace, - }, - } - ``` - -- The Error trait's `source()` method is implemented to return whichever field - has a `#[source]` attribute or is named `source`, if any. This is for - identifying the underlying lower level error that caused your error. - - The `#[from]` attribute always implies that the same field is `#[source]`, so - you don't ever need to specify both attributes. - - Any error type that implements `std::error::Error` or dereferences to `dyn - std::error::Error` will work as a source. - - ```rust - #[derive(Error, Debug)] - pub struct MyError { - msg: String, - #[source] // optional if field name is `source` - source: anyhow::Error, - } - ``` - -- The Error trait's `provide()` method is implemented to provide whichever field - has a type named `Backtrace`, if any, as a `std::backtrace::Backtrace`. - - ```rust - use std::backtrace::Backtrace; - - #[derive(Error, Debug)] - pub struct MyError { - msg: String, - backtrace: Backtrace, // automatically detected - } - ``` - -- If a field is both a source (named `source`, or has `#[source]` or `#[from]` - attribute) *and* is marked `#[backtrace]`, then the Error trait's `provide()` - method is forwarded to the source's `provide` so that both layers of the error - share the same backtrace. - - ```rust - #[derive(Error, Debug)] - pub enum MyError { - Io { - #[backtrace] - source: io::Error, - }, - } - ``` - -- Errors may use `error(transparent)` to forward the source and Display methods - straight through to an underlying error without adding an additional message. - This would be appropriate for enums that need an "anything else" variant. - - ```rust - #[derive(Error, Debug)] - pub enum MyError { - ... - - #[error(transparent)] - Other(#[from] anyhow::Error), // source and Display delegate to anyhow::Error - } - ``` - - Another use case is hiding implementation details of an error representation - behind an opaque error type, so that the representation is able to evolve - without breaking the crate's public API. - - ```rust - // PublicError is public, but opaque and easy to keep compatible. - #[derive(Error, Debug)] - #[error(transparent)] - pub struct PublicError(#[from] ErrorRepr); - - impl PublicError { - // Accessors for anything we do want to expose publicly. - } - - // Private and free to change across minor version of the crate. - #[derive(Error, Debug)] - enum ErrorRepr { - ... - } - ``` - -- See also the [`anyhow`] library for a convenient single error type to use in - application code. - - [`anyhow`]: https://github.com/dtolnay/anyhow - -
- -## Comparison to anyhow - -Use thiserror if you care about designing your own dedicated error type(s) so -that the caller receives exactly the information that you choose in the event of -failure. This most often applies to library-like code. Use [Anyhow] if you don't -care what error type your functions return, you just want it to be easy. This is -common in application-like code. - -[Anyhow]: https://github.com/dtolnay/anyhow - -
- -#### License - - -Licensed under either of
Apache License, Version -2.0 or MIT license at your option. - - -
- - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. - diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/build.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/build.rs deleted file mode 100644 index 004dfb015974..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/build.rs +++ /dev/null @@ -1,66 +0,0 @@ -use std::env; -use std::fs; -use std::path::Path; -use std::process::{Command, ExitStatus, Stdio}; -use std::str; - -// This code exercises the surface area that we expect of the Provider API. If -// the current toolchain is able to compile it, then thiserror is able to use -// providers for backtrace support. -const PROBE: &str = r#" - #![feature(provide_any)] - - use std::any::{Demand, Provider}; - - fn _f<'a, P: Provider>(p: &'a P, demand: &mut Demand<'a>) { - p.provide(demand); - } -"#; - -fn main() { - match compile_probe() { - Some(status) if status.success() => println!("cargo:rustc-cfg=provide_any"), - _ => {} - } -} - -fn compile_probe() -> Option { - let rustc = env::var_os("RUSTC")?; - let out_dir = env::var_os("OUT_DIR")?; - let probefile = Path::new(&out_dir).join("probe.rs"); - fs::write(&probefile, PROBE).ok()?; - - // Make sure to pick up Cargo rustc configuration. - let mut cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER") { - let mut cmd = Command::new(wrapper); - // The wrapper's first argument is supposed to be the path to rustc. - cmd.arg(rustc); - cmd - } else { - Command::new(rustc) - }; - - cmd.stderr(Stdio::null()) - .arg("--edition=2018") - .arg("--crate-name=thiserror_build") - .arg("--crate-type=lib") - .arg("--emit=metadata") - .arg("--out-dir") - .arg(out_dir) - .arg(probefile); - - if let Some(target) = env::var_os("TARGET") { - cmd.arg("--target").arg(target); - } - - // If Cargo wants to set RUSTFLAGS, use that. - if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { - if !rustflags.is_empty() { - for arg in rustflags.split('\x1f') { - cmd.arg(arg); - } - } - } - - cmd.status().ok() -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/display.rs deleted file mode 100644 index 0eb0dd9e201c..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/display.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::fmt::Display; -use std::path::{self, Path, PathBuf}; - -pub trait DisplayAsDisplay { - fn as_display(&self) -> Self; -} - -impl DisplayAsDisplay for &T { - fn as_display(&self) -> Self { - self - } -} - -pub trait PathAsDisplay { - fn as_display(&self) -> path::Display<'_>; -} - -impl PathAsDisplay for Path { - fn as_display(&self) -> path::Display<'_> { - self.display() - } -} - -impl PathAsDisplay for PathBuf { - fn as_display(&self) -> path::Display<'_> { - self.display() - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/lib.rs deleted file mode 100644 index aae6552d091d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/lib.rs +++ /dev/null @@ -1,254 +0,0 @@ -//! [![github]](https://github.com/dtolnay/thiserror) [![crates-io]](https://crates.io/crates/thiserror) [![docs-rs]](https://docs.rs/thiserror) -//! -//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust -//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs -//! -//!
-//! -//! This library provides a convenient derive macro for the standard library's -//! [`std::error::Error`] trait. -//! -//! [`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html -//! -//!
-//! -//! # Example -//! -//! ```rust -//! # use std::io; -//! use thiserror::Error; -//! -//! #[derive(Error, Debug)] -//! pub enum DataStoreError { -//! #[error("data store disconnected")] -//! Disconnect(#[from] io::Error), -//! #[error("the data for key `{0}` is not available")] -//! Redaction(String), -//! #[error("invalid header (expected {expected:?}, found {found:?})")] -//! InvalidHeader { -//! expected: String, -//! found: String, -//! }, -//! #[error("unknown data store error")] -//! Unknown, -//! } -//! ``` -//! -//!
-//! -//! # Details -//! -//! - Thiserror deliberately does not appear in your public API. You get the -//! same thing as if you had written an implementation of `std::error::Error` -//! by hand, and switching from handwritten impls to thiserror or vice versa -//! is not a breaking change. -//! -//! - Errors may be enums, structs with named fields, tuple structs, or unit -//! structs. -//! -//! - A `Display` impl is generated for your error if you provide -//! `#[error("...")]` messages on the struct or each variant of your enum, as -//! shown above in the example. -//! -//! The messages support a shorthand for interpolating fields from the error. -//! -//! - `#[error("{var}")]` ⟶ `write!("{}", self.var)` -//! - `#[error("{0}")]` ⟶ `write!("{}", self.0)` -//! - `#[error("{var:?}")]` ⟶ `write!("{:?}", self.var)` -//! - `#[error("{0:?}")]` ⟶ `write!("{:?}", self.0)` -//! -//! These shorthands can be used together with any additional format args, -//! which may be arbitrary expressions. For example: -//! -//! ```rust -//! # use std::i32; -//! # use thiserror::Error; -//! # -//! #[derive(Error, Debug)] -//! pub enum Error { -//! #[error("invalid rdo_lookahead_frames {0} (expected < {})", i32::MAX)] -//! InvalidLookahead(u32), -//! } -//! ``` -//! -//! If one of the additional expression arguments needs to refer to a field of -//! the struct or enum, then refer to named fields as `.var` and tuple fields -//! as `.0`. -//! -//! ```rust -//! # use thiserror::Error; -//! # -//! # fn first_char(s: &String) -> char { -//! # s.chars().next().unwrap() -//! # } -//! # -//! # #[derive(Debug)] -//! # struct Limits { -//! # lo: usize, -//! # hi: usize, -//! # } -//! # -//! #[derive(Error, Debug)] -//! pub enum Error { -//! #[error("first letter must be lowercase but was {:?}", first_char(.0))] -//! WrongCase(String), -//! #[error("invalid index {idx}, expected at least {} and at most {}", .limits.lo, .limits.hi)] -//! OutOfBounds { idx: usize, limits: Limits }, -//! } -//! ``` -//! -//! - A `From` impl is generated for each variant containing a `#[from]` -//! attribute. -//! -//! Note that the variant must not contain any other fields beyond the source -//! error and possibly a backtrace. A backtrace is captured from within the -//! `From` impl if there is a field for it. -//! -//! ```rust -//! # const IGNORE: &str = stringify! { -//! #[derive(Error, Debug)] -//! pub enum MyError { -//! Io { -//! #[from] -//! source: io::Error, -//! backtrace: Backtrace, -//! }, -//! } -//! # }; -//! ``` -//! -//! - The Error trait's `source()` method is implemented to return whichever -//! field has a `#[source]` attribute or is named `source`, if any. This is -//! for identifying the underlying lower level error that caused your error. -//! -//! The `#[from]` attribute always implies that the same field is `#[source]`, -//! so you don't ever need to specify both attributes. -//! -//! Any error type that implements `std::error::Error` or dereferences to `dyn -//! std::error::Error` will work as a source. -//! -//! ```rust -//! # use std::fmt::{self, Display}; -//! # use thiserror::Error; -//! # -//! #[derive(Error, Debug)] -//! pub struct MyError { -//! msg: String, -//! #[source] // optional if field name is `source` -//! source: anyhow::Error, -//! } -//! # -//! # impl Display for MyError { -//! # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { -//! # unimplemented!() -//! # } -//! # } -//! ``` -//! -//! - The Error trait's `provide()` method is implemented to provide whichever -//! field has a type named `Backtrace`, if any, as a -//! `std::backtrace::Backtrace`. -//! -//! ```rust -//! # const IGNORE: &str = stringify! { -//! use std::backtrace::Backtrace; -//! -//! #[derive(Error, Debug)] -//! pub struct MyError { -//! msg: String, -//! backtrace: Backtrace, // automatically detected -//! } -//! # }; -//! ``` -//! -//! - If a field is both a source (named `source`, or has `#[source]` or -//! `#[from]` attribute) *and* is marked `#[backtrace]`, then the Error -//! trait's `provide()` method is forwarded to the source's `provide` so that -//! both layers of the error share the same backtrace. -//! -//! ```rust -//! # const IGNORE: &str = stringify! { -//! #[derive(Error, Debug)] -//! pub enum MyError { -//! Io { -//! #[backtrace] -//! source: io::Error, -//! }, -//! } -//! # }; -//! ``` -//! -//! - Errors may use `error(transparent)` to forward the source and Display -//! methods straight through to an underlying error without adding an -//! additional message. This would be appropriate for enums that need an -//! "anything else" variant. -//! -//! ``` -//! # use thiserror::Error; -//! # -//! #[derive(Error, Debug)] -//! pub enum MyError { -//! # /* -//! ... -//! # */ -//! -//! #[error(transparent)] -//! Other(#[from] anyhow::Error), // source and Display delegate to anyhow::Error -//! } -//! ``` -//! -//! Another use case is hiding implementation details of an error -//! representation behind an opaque error type, so that the representation is -//! able to evolve without breaking the crate's public API. -//! -//! ``` -//! # use thiserror::Error; -//! # -//! // PublicError is public, but opaque and easy to keep compatible. -//! #[derive(Error, Debug)] -//! #[error(transparent)] -//! pub struct PublicError(#[from] ErrorRepr); -//! -//! impl PublicError { -//! // Accessors for anything we do want to expose publicly. -//! } -//! -//! // Private and free to change across minor version of the crate. -//! #[derive(Error, Debug)] -//! enum ErrorRepr { -//! # /* -//! ... -//! # */ -//! } -//! ``` -//! -//! - See also the [`anyhow`] library for a convenient single error type to use -//! in application code. -//! -//! [`anyhow`]: https://github.com/dtolnay/anyhow - -#![allow( - // Clippy bug: https://github.com/rust-lang/rust-clippy/issues/7421 - clippy::doc_markdown, - clippy::module_name_repetitions, - clippy::return_self_not_must_use, - clippy::wildcard_imports, -)] -#![cfg_attr(provide_any, feature(provide_any))] - -mod aserror; -mod display; -#[cfg(provide_any)] -mod provide; - -pub use thiserror_impl::*; - -// Not public API. -#[doc(hidden)] -pub mod __private { - pub use crate::aserror::AsDynError; - pub use crate::display::{DisplayAsDisplay, PathAsDisplay}; - #[cfg(provide_any)] - pub use crate::provide::ThiserrorProvide; -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/provide.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/provide.rs deleted file mode 100644 index 524e7435d784..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/provide.rs +++ /dev/null @@ -1,15 +0,0 @@ -use std::any::{Demand, Provider}; - -pub trait ThiserrorProvide: Sealed { - fn thiserror_provide<'a>(&'a self, demand: &mut Demand<'a>); -} - -impl ThiserrorProvide for T { - #[inline] - fn thiserror_provide<'a>(&'a self, demand: &mut Demand<'a>) { - self.provide(demand); - } -} - -pub trait Sealed {} -impl Sealed for T {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/compiletest.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/compiletest.rs deleted file mode 100644 index 7974a6249e43..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/compiletest.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[rustversion::attr(not(nightly), ignore)] -#[cfg_attr(miri, ignore)] -#[test] -fn ui() { - let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_backtrace.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_backtrace.rs deleted file mode 100644 index 43f68b8b7e54..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_backtrace.rs +++ /dev/null @@ -1,297 +0,0 @@ -#![cfg_attr( - thiserror_nightly_testing, - feature(error_generic_member_access, provide_any) -)] - -use thiserror::Error; - -#[derive(Error, Debug)] -#[error("...")] -pub struct Inner; - -#[cfg(thiserror_nightly_testing)] -#[derive(Error, Debug)] -#[error("...")] -pub struct InnerBacktrace { - backtrace: std::backtrace::Backtrace, -} - -#[cfg(thiserror_nightly_testing)] -pub mod structs { - use super::{Inner, InnerBacktrace}; - use std::any; - use std::backtrace::Backtrace; - use std::error::Error; - use std::sync::Arc; - use thiserror::Error; - - #[derive(Error, Debug)] - #[error("...")] - pub struct PlainBacktrace { - backtrace: Backtrace, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct ExplicitBacktrace { - #[backtrace] - backtrace: Backtrace, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct OptBacktrace { - #[backtrace] - backtrace: Option, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct ArcBacktrace { - #[backtrace] - backtrace: Arc, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct BacktraceFrom { - #[from] - source: Inner, - #[backtrace] - backtrace: Backtrace, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct CombinedBacktraceFrom { - #[from] - #[backtrace] - source: InnerBacktrace, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct OptBacktraceFrom { - #[from] - source: Inner, - #[backtrace] - backtrace: Option, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct ArcBacktraceFrom { - #[from] - source: Inner, - #[backtrace] - backtrace: Arc, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct AnyhowBacktrace { - #[backtrace] - source: anyhow::Error, - } - - #[derive(Error, Debug)] - #[error("...")] - pub struct BoxDynErrorBacktrace { - #[backtrace] - source: Box, - } - - #[test] - fn test_backtrace() { - let error = PlainBacktrace { - backtrace: Backtrace::capture(), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = ExplicitBacktrace { - backtrace: Backtrace::capture(), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = OptBacktrace { - backtrace: Some(Backtrace::capture()), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = ArcBacktrace { - backtrace: Arc::new(Backtrace::capture()), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = BacktraceFrom::from(Inner); - assert!(any::request_ref::(&error).is_some()); - - let error = CombinedBacktraceFrom::from(InnerBacktrace { - backtrace: Backtrace::capture(), - }); - assert!(any::request_ref::(&error).is_some()); - - let error = OptBacktraceFrom::from(Inner); - assert!(any::request_ref::(&error).is_some()); - - let error = ArcBacktraceFrom::from(Inner); - assert!(any::request_ref::(&error).is_some()); - - let error = AnyhowBacktrace { - source: anyhow::Error::msg("..."), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = BoxDynErrorBacktrace { - source: Box::new(PlainBacktrace { - backtrace: Backtrace::capture(), - }), - }; - assert!(any::request_ref::(&error).is_some()); - } - - // https://github.com/dtolnay/thiserror/issues/185 -- std::error::Error and - // std::any::Provide both have a method called 'provide', so directly - // calling it from generated code could be ambiguous. - #[test] - fn test_provide_name_collision() { - use std::any::Provider; - - #[derive(Error, Debug)] - #[error("...")] - struct MyError { - #[source] - #[backtrace] - x: std::io::Error, - } - - let _: dyn Error; - let _: dyn Provider; - } -} - -#[cfg(thiserror_nightly_testing)] -pub mod enums { - use super::{Inner, InnerBacktrace}; - use std::any; - use std::backtrace::Backtrace; - use std::sync::Arc; - use thiserror::Error; - - #[derive(Error, Debug)] - pub enum PlainBacktrace { - #[error("...")] - Test { backtrace: Backtrace }, - } - - #[derive(Error, Debug)] - pub enum ExplicitBacktrace { - #[error("...")] - Test { - #[backtrace] - backtrace: Backtrace, - }, - } - - #[derive(Error, Debug)] - pub enum OptBacktrace { - #[error("...")] - Test { - #[backtrace] - backtrace: Option, - }, - } - - #[derive(Error, Debug)] - pub enum ArcBacktrace { - #[error("...")] - Test { - #[backtrace] - backtrace: Arc, - }, - } - - #[derive(Error, Debug)] - pub enum BacktraceFrom { - #[error("...")] - Test { - #[from] - source: Inner, - #[backtrace] - backtrace: Backtrace, - }, - } - - #[derive(Error, Debug)] - pub enum CombinedBacktraceFrom { - #[error("...")] - Test { - #[from] - #[backtrace] - source: InnerBacktrace, - }, - } - - #[derive(Error, Debug)] - pub enum OptBacktraceFrom { - #[error("...")] - Test { - #[from] - source: Inner, - #[backtrace] - backtrace: Option, - }, - } - - #[derive(Error, Debug)] - pub enum ArcBacktraceFrom { - #[error("...")] - Test { - #[from] - source: Inner, - #[backtrace] - backtrace: Arc, - }, - } - - #[test] - fn test_backtrace() { - let error = PlainBacktrace::Test { - backtrace: Backtrace::capture(), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = ExplicitBacktrace::Test { - backtrace: Backtrace::capture(), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = OptBacktrace::Test { - backtrace: Some(Backtrace::capture()), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = ArcBacktrace::Test { - backtrace: Arc::new(Backtrace::capture()), - }; - assert!(any::request_ref::(&error).is_some()); - - let error = BacktraceFrom::from(Inner); - assert!(any::request_ref::(&error).is_some()); - - let error = CombinedBacktraceFrom::from(InnerBacktrace { - backtrace: Backtrace::capture(), - }); - assert!(any::request_ref::(&error).is_some()); - - let error = OptBacktraceFrom::from(Inner); - assert!(any::request_ref::(&error).is_some()); - - let error = ArcBacktraceFrom::from(Inner); - assert!(any::request_ref::(&error).is_some()); - } -} - -#[test] -#[cfg_attr(not(thiserror_nightly_testing), ignore)] -fn test_backtrace() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-backtrace-backtrace.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-backtrace-backtrace.rs deleted file mode 100644 index 8f411bf5d890..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-backtrace-backtrace.rs +++ /dev/null @@ -1,10 +0,0 @@ -// https://github.com/dtolnay/thiserror/issues/163 - -use std::backtrace::Backtrace; -use thiserror::Error; - -#[derive(Error, Debug)] -#[error("...")] -pub struct Error(#[from] #[backtrace] std::io::Error, Backtrace); - -fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-backtrace-backtrace.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-backtrace-backtrace.stderr deleted file mode 100644 index 55d647b42565..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-backtrace-backtrace.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: deriving From requires no fields other than source and backtrace - --> tests/ui/from-backtrace-backtrace.rs:8:18 - | -8 | pub struct Error(#[from] #[backtrace] std::io::Error, Backtrace); - | ^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/no-display.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/no-display.stderr deleted file mode 100644 index 76818e1e5e0f..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/no-display.stderr +++ /dev/null @@ -1,17 +0,0 @@ -error[E0599]: the method `as_display` exists for reference `&NoDisplay`, but its trait bounds were not satisfied - --> tests/ui/no-display.rs:7:9 - | -4 | struct NoDisplay; - | ---------------- doesn't satisfy `NoDisplay: std::fmt::Display` -... -7 | #[error("thread: {thread}")] - | ^^^^^^^^^^^^^^^^^^ method cannot be called on `&NoDisplay` due to unsatisfied trait bounds - | - = note: the following trait bounds were not satisfied: - `NoDisplay: std::fmt::Display` - which is required by `&NoDisplay: DisplayAsDisplay` -note: the trait `std::fmt::Display` must be implemented - --> $RUST/core/src/fmt/mod.rs - | - | pub trait Display { - | ^^^^^^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-enum-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-enum-not-error.stderr deleted file mode 100644 index 750c69eb3e9c..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-enum-not-error.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error[E0599]: the method `as_dyn_error` exists for reference `&NotError`, but its trait bounds were not satisfied - --> tests/ui/source-enum-not-error.rs:10:9 - | -4 | pub struct NotError; - | ------------------- - | | - | doesn't satisfy `NotError: AsDynError<'_>` - | doesn't satisfy `NotError: std::error::Error` -... -10 | source: NotError, - | ^^^^^^ method cannot be called on `&NotError` due to unsatisfied trait bounds - | - = note: the following trait bounds were not satisfied: - `NotError: std::error::Error` - which is required by `NotError: AsDynError<'_>` - `&NotError: std::error::Error` - which is required by `&NotError: AsDynError<'_>` -note: the trait `std::error::Error` must be implemented - --> $RUST/core/src/error.rs - | - | pub trait Error: Debug + Display { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-struct-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-struct-not-error.stderr deleted file mode 100644 index b98460fcbe2a..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-struct-not-error.stderr +++ /dev/null @@ -1,21 +0,0 @@ -error[E0599]: the method `as_dyn_error` exists for struct `NotError`, but its trait bounds were not satisfied - --> tests/ui/source-struct-not-error.rs:9:5 - | -4 | struct NotError; - | --------------- - | | - | method `as_dyn_error` not found for this struct - | doesn't satisfy `NotError: AsDynError<'_>` - | doesn't satisfy `NotError: std::error::Error` -... -9 | source: NotError, - | ^^^^^^ method cannot be called on `NotError` due to unsatisfied trait bounds - | - = note: the following trait bounds were not satisfied: - `NotError: std::error::Error` - which is required by `NotError: AsDynError<'_>` -note: the trait `std::error::Error` must be implemented - --> $RUST/core/src/error.rs - | - | pub trait Error: Debug + Display { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/zeroize-1.5.5/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.cargo-checksum.json similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/zeroize-1.5.5/.cargo-checksum.json rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.cargo-checksum.json diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.cargo_vcs_info.json new file mode 100644 index 000000000000..fb61c21a2445 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "5088592a4efb6a5c40b4d869eb1a0e2eacf622cb" + }, + "path_in_vcs": "" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.github/FUNDING.yml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.github/FUNDING.yml new file mode 100644 index 000000000000..750707701cda --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.github/FUNDING.yml @@ -0,0 +1 @@ +github: dtolnay diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.github/workflows/ci.yml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.github/workflows/ci.yml new file mode 100644 index 000000000000..65a20f51973a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.github/workflows/ci.yml @@ -0,0 +1,111 @@ +name: CI + +on: + push: + pull_request: + workflow_dispatch: + schedule: [cron: "40 1 * * *"] + +permissions: + contents: read + +env: + RUSTFLAGS: -Dwarnings + +jobs: + pre_ci: + uses: dtolnay/.github/.github/workflows/pre_ci.yml@master + + test: + name: Rust ${{matrix.rust}} + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: [nightly, beta, stable, 1.56.0] + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{matrix.rust}} + components: rust-src + - name: Enable type layout randomization + run: echo RUSTFLAGS=${RUSTFLAGS}\ -Zrandomize-layout >> $GITHUB_ENV + if: matrix.rust == 'nightly' + - name: Enable nightly-only tests + run: echo RUSTFLAGS=${RUSTFLAGS}\ --cfg=thiserror_nightly_testing >> $GITHUB_ENV + if: matrix.rust == 'nightly' + - run: cargo test --all + - uses: actions/upload-artifact@v4 + if: matrix.rust == 'nightly' && always() + with: + name: Cargo.lock + path: Cargo.lock + + minimal: + name: Minimal versions + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + - run: cargo generate-lockfile -Z minimal-versions + - run: cargo check --locked + + doc: + name: Documentation + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + env: + RUSTDOCFLAGS: -Dwarnings + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rust-src + - uses: dtolnay/install@cargo-docs-rs + - run: cargo docs-rs + + clippy: + name: Clippy + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@nightly + with: + components: clippy, rust-src + - run: cargo clippy --tests --workspace -- -Dclippy::all -Dclippy::pedantic + + miri: + name: Miri + needs: pre_ci + if: needs.pre_ci.outputs.continue + runs-on: ubuntu-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@miri + - run: cargo miri setup + - run: cargo miri test + env: + MIRIFLAGS: -Zmiri-strict-provenance + + outdated: + name: Outdated + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + timeout-minutes: 45 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/install@cargo-outdated + - run: cargo outdated --workspace --exit-code 1 diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.gitignore b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.gitignore new file mode 100644 index 000000000000..693699042b1a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/.gitignore @@ -0,0 +1,3 @@ +/target +**/*.rs.bk +Cargo.lock diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/Cargo.toml new file mode 100644 index 000000000000..1e1d0cdde929 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/Cargo.toml @@ -0,0 +1,110 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "thiserror" +version = "1.0.65" +authors = ["David Tolnay "] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "derive(Error)" +documentation = "https://docs.rs/thiserror" +readme = "README.md" +keywords = [ + "error", + "error-handling", + "derive", +] +categories = ["rust-patterns"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/thiserror" + +[package.metadata.docs.rs] +rustdoc-args = ["--generate-link-to-definition"] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +name = "thiserror" +path = "src/lib.rs" + +[[test]] +name = "compiletest" +path = "tests/compiletest.rs" + +[[test]] +name = "test_backtrace" +path = "tests/test_backtrace.rs" + +[[test]] +name = "test_deprecated" +path = "tests/test_deprecated.rs" + +[[test]] +name = "test_display" +path = "tests/test_display.rs" + +[[test]] +name = "test_error" +path = "tests/test_error.rs" + +[[test]] +name = "test_expr" +path = "tests/test_expr.rs" + +[[test]] +name = "test_from" +path = "tests/test_from.rs" + +[[test]] +name = "test_generics" +path = "tests/test_generics.rs" + +[[test]] +name = "test_lints" +path = "tests/test_lints.rs" + +[[test]] +name = "test_option" +path = "tests/test_option.rs" + +[[test]] +name = "test_path" +path = "tests/test_path.rs" + +[[test]] +name = "test_source" +path = "tests/test_source.rs" + +[[test]] +name = "test_transparent" +path = "tests/test_transparent.rs" + +[dependencies.thiserror-impl] +version = "=1.0.65" + +[dev-dependencies.anyhow] +version = "1.0.73" + +[dev-dependencies.ref-cast] +version = "1.0.18" + +[dev-dependencies.rustversion] +version = "1.0.13" + +[dev-dependencies.trybuild] +version = "1.0.81" +features = ["diff"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/Cargo.toml.orig new file mode 100644 index 000000000000..4ed217390c43 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/Cargo.toml.orig @@ -0,0 +1,28 @@ +[package] +name = "thiserror" +version = "1.0.65" +authors = ["David Tolnay "] +categories = ["rust-patterns"] +description = "derive(Error)" +documentation = "https://docs.rs/thiserror" +edition = "2021" +keywords = ["error", "error-handling", "derive"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/thiserror" +rust-version = "1.56" + +[dependencies] +thiserror-impl = { version = "=1.0.65", path = "impl" } + +[dev-dependencies] +anyhow = "1.0.73" +ref-cast = "1.0.18" +rustversion = "1.0.13" +trybuild = { version = "1.0.81", features = ["diff"] } + +[workspace] +members = ["impl"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = ["--generate-link-to-definition"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/LICENSE-APACHE new file mode 100644 index 000000000000..1b5ec8b78e23 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/LICENSE-MIT similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/LICENSE-MIT rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/LICENSE-MIT diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/README.md b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/README.md new file mode 100644 index 000000000000..3b7d74375ccd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/README.md @@ -0,0 +1,238 @@ +derive(Error) +============= + +[github](https://github.com/dtolnay/thiserror) +[crates.io](https://crates.io/crates/thiserror) +[docs.rs](https://docs.rs/thiserror) +[build status](https://github.com/dtolnay/thiserror/actions?query=branch%3Amaster) + +This library provides a convenient derive macro for the standard library's +[`std::error::Error`] trait. + +[`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html + +```toml +[dependencies] +thiserror = "1.0" +``` + +*Compiler support: requires rustc 1.56+* + +
+ +## Example + +```rust +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum DataStoreError { + #[error("data store disconnected")] + Disconnect(#[from] io::Error), + #[error("the data for key `{0}` is not available")] + Redaction(String), + #[error("invalid header (expected {expected:?}, found {found:?})")] + InvalidHeader { + expected: String, + found: String, + }, + #[error("unknown data store error")] + Unknown, +} +``` + +
+ +## Details + +- Thiserror deliberately does not appear in your public API. You get the same + thing as if you had written an implementation of `std::error::Error` by hand, + and switching from handwritten impls to thiserror or vice versa is not a + breaking change. + +- Errors may be enums, structs with named fields, tuple structs, or unit + structs. + +- A `Display` impl is generated for your error if you provide `#[error("...")]` + messages on the struct or each variant of your enum, as shown above in the + example. + + The messages support a shorthand for interpolating fields from the error. + + - `#[error("{var}")]` ⟶ `write!("{}", self.var)` + - `#[error("{0}")]` ⟶ `write!("{}", self.0)` + - `#[error("{var:?}")]` ⟶ `write!("{:?}", self.var)` + - `#[error("{0:?}")]` ⟶ `write!("{:?}", self.0)` + + These shorthands can be used together with any additional format args, which + may be arbitrary expressions. For example: + + ```rust + #[derive(Error, Debug)] + pub enum Error { + #[error("invalid rdo_lookahead_frames {0} (expected < {})", i32::MAX)] + InvalidLookahead(u32), + } + ``` + + If one of the additional expression arguments needs to refer to a field of the + struct or enum, then refer to named fields as `.var` and tuple fields as `.0`. + + ```rust + #[derive(Error, Debug)] + pub enum Error { + #[error("first letter must be lowercase but was {:?}", first_char(.0))] + WrongCase(String), + #[error("invalid index {idx}, expected at least {} and at most {}", .limits.lo, .limits.hi)] + OutOfBounds { idx: usize, limits: Limits }, + } + ``` + +- A `From` impl is generated for each variant that contains a `#[from]` + attribute. + + The variant using `#[from]` must not contain any other fields beyond the + source error (and possibly a backtrace — see below). Usually `#[from]` + fields are unnamed, but `#[from]` is allowed on a named field too. + + ```rust + #[derive(Error, Debug)] + pub enum MyError { + Io(#[from] io::Error), + Glob(#[from] globset::Error), + } + ``` + +- The Error trait's `source()` method is implemented to return whichever field + has a `#[source]` attribute or is named `source`, if any. This is for + identifying the underlying lower level error that caused your error. + + The `#[from]` attribute always implies that the same field is `#[source]`, so + you don't ever need to specify both attributes. + + Any error type that implements `std::error::Error` or dereferences to `dyn + std::error::Error` will work as a source. + + ```rust + #[derive(Error, Debug)] + pub struct MyError { + msg: String, + #[source] // optional if field name is `source` + source: anyhow::Error, + } + ``` + +- The Error trait's `provide()` method is implemented to provide whichever field + has a type named `Backtrace`, if any, as a `std::backtrace::Backtrace`. Using + `Backtrace` in errors requires a nightly compiler with Rust version 1.73 or + newer. + + ```rust + use std::backtrace::Backtrace; + + #[derive(Error, Debug)] + pub struct MyError { + msg: String, + backtrace: Backtrace, // automatically detected + } + ``` + +- If a field is both a source (named `source`, or has `#[source]` or `#[from]` + attribute) *and* is marked `#[backtrace]`, then the Error trait's `provide()` + method is forwarded to the source's `provide` so that both layers of the error + share the same backtrace. The `#[backtrace]` attribute requires a nightly + compiler with Rust version 1.73 or newer. + + + ```rust + #[derive(Error, Debug)] + pub enum MyError { + Io { + #[backtrace] + source: io::Error, + }, + } + ``` + +- For variants that use `#[from]` and also contain a `Backtrace` field, a + backtrace is captured from within the `From` impl. + + ```rust + #[derive(Error, Debug)] + pub enum MyError { + Io { + #[from] + source: io::Error, + backtrace: Backtrace, + }, + } + ``` + +- Errors may use `error(transparent)` to forward the source and Display methods + straight through to an underlying error without adding an additional message. + This would be appropriate for enums that need an "anything else" variant. + + ```rust + #[derive(Error, Debug)] + pub enum MyError { + ... + + #[error(transparent)] + Other(#[from] anyhow::Error), // source and Display delegate to anyhow::Error + } + ``` + + Another use case is hiding implementation details of an error representation + behind an opaque error type, so that the representation is able to evolve + without breaking the crate's public API. + + ```rust + // PublicError is public, but opaque and easy to keep compatible. + #[derive(Error, Debug)] + #[error(transparent)] + pub struct PublicError(#[from] ErrorRepr); + + impl PublicError { + // Accessors for anything we do want to expose publicly. + } + + // Private and free to change across minor version of the crate. + #[derive(Error, Debug)] + enum ErrorRepr { + ... + } + ``` + +- See also the [`anyhow`] library for a convenient single error type to use in + application code. + + [`anyhow`]: https://github.com/dtolnay/anyhow + +
+ +## Comparison to anyhow + +Use thiserror if you care about designing your own dedicated error type(s) so +that the caller receives exactly the information that you choose in the event of +failure. This most often applies to library-like code. Use [Anyhow] if you don't +care what error type your functions return, you just want it to be easy. This is +common in application-like code. + +[Anyhow]: https://github.com/dtolnay/anyhow + +
+ +#### License + + +Licensed under either of Apache License, Version +2.0 or MIT license at your option. + + +
+ + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this crate by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. + diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/build.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/build.rs new file mode 100644 index 000000000000..51ac436e1fa9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/build.rs @@ -0,0 +1,146 @@ +use std::env; +use std::ffi::OsString; +use std::fs; +use std::io::ErrorKind; +use std::iter; +use std::path::Path; +use std::process::{self, Command, Stdio}; + +fn main() { + println!("cargo:rerun-if-changed=build/probe.rs"); + + println!("cargo:rustc-check-cfg=cfg(error_generic_member_access)"); + println!("cargo:rustc-check-cfg=cfg(thiserror_nightly_testing)"); + + let error_generic_member_access; + let consider_rustc_bootstrap; + if compile_probe(false) { + // This is a nightly or dev compiler, so it supports unstable features + // regardless of RUSTC_BOOTSTRAP. No need to rerun build script if + // RUSTC_BOOTSTRAP is changed. + error_generic_member_access = true; + consider_rustc_bootstrap = false; + } else if let Some(rustc_bootstrap) = env::var_os("RUSTC_BOOTSTRAP") { + if compile_probe(true) { + // This is a stable or beta compiler for which the user has set + // RUSTC_BOOTSTRAP to turn on unstable features. Rerun build script + // if they change it. + error_generic_member_access = true; + consider_rustc_bootstrap = true; + } else if rustc_bootstrap == "1" { + // This compiler does not support the generic member access API in + // the form that thiserror expects. No need to pay attention to + // RUSTC_BOOTSTRAP. + error_generic_member_access = false; + consider_rustc_bootstrap = false; + } else { + // This is a stable or beta compiler for which RUSTC_BOOTSTRAP is + // set to restrict the use of unstable features by this crate. + error_generic_member_access = false; + consider_rustc_bootstrap = true; + } + } else { + // Without RUSTC_BOOTSTRAP, this compiler does not support the generic + // member access API in the form that thiserror expects, but try again + // if the user turns on unstable features. + error_generic_member_access = false; + consider_rustc_bootstrap = true; + } + + if error_generic_member_access { + println!("cargo:rustc-cfg=error_generic_member_access"); + } + + if consider_rustc_bootstrap { + println!("cargo:rerun-if-env-changed=RUSTC_BOOTSTRAP"); + } +} + +fn compile_probe(rustc_bootstrap: bool) -> bool { + if env::var_os("RUSTC_STAGE").is_some() { + // We are running inside rustc bootstrap. This is a highly non-standard + // environment with issues such as: + // + // https://github.com/rust-lang/cargo/issues/11138 + // https://github.com/rust-lang/rust/issues/114839 + // + // Let's just not use nightly features here. + return false; + } + + let rustc = cargo_env_var("RUSTC"); + let out_dir = cargo_env_var("OUT_DIR"); + let out_subdir = Path::new(&out_dir).join("probe"); + let probefile = Path::new("build").join("probe.rs"); + + if let Err(err) = fs::create_dir(&out_subdir) { + if err.kind() != ErrorKind::AlreadyExists { + eprintln!("Failed to create {}: {}", out_subdir.display(), err); + process::exit(1); + } + } + + let rustc_wrapper = env::var_os("RUSTC_WRAPPER").filter(|wrapper| !wrapper.is_empty()); + let rustc_workspace_wrapper = + env::var_os("RUSTC_WORKSPACE_WRAPPER").filter(|wrapper| !wrapper.is_empty()); + let mut rustc = rustc_wrapper + .into_iter() + .chain(rustc_workspace_wrapper) + .chain(iter::once(rustc)); + let mut cmd = Command::new(rustc.next().unwrap()); + cmd.args(rustc); + + if !rustc_bootstrap { + cmd.env_remove("RUSTC_BOOTSTRAP"); + } + + cmd.stderr(Stdio::null()) + .arg("--edition=2018") + .arg("--crate-name=thiserror") + .arg("--crate-type=lib") + .arg("--cap-lints=allow") + .arg("--emit=dep-info,metadata") + .arg("--out-dir") + .arg(&out_subdir) + .arg(probefile); + + if let Some(target) = env::var_os("TARGET") { + cmd.arg("--target").arg(target); + } + + // If Cargo wants to set RUSTFLAGS, use that. + if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { + if !rustflags.is_empty() { + for arg in rustflags.split('\x1f') { + cmd.arg(arg); + } + } + } + + let success = match cmd.status() { + Ok(status) => status.success(), + Err(_) => false, + }; + + // Clean up to avoid leaving nondeterministic absolute paths in the dep-info + // file in OUT_DIR, which causes nonreproducible builds in build systems + // that treat the entire OUT_DIR as an artifact. + if let Err(err) = fs::remove_dir_all(&out_subdir) { + if err.kind() != ErrorKind::NotFound { + eprintln!("Failed to clean up {}: {}", out_subdir.display(), err); + process::exit(1); + } + } + + success +} + +fn cargo_env_var(key: &str) -> OsString { + env::var_os(key).unwrap_or_else(|| { + eprintln!( + "Environment variable ${} is not set during execution of build script", + key, + ); + process::exit(1); + }) +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/build/probe.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/build/probe.rs new file mode 100644 index 000000000000..faf25c5c8945 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/build/probe.rs @@ -0,0 +1,32 @@ +// This code exercises the surface area that we expect of the Error generic +// member access API. If the current toolchain is able to compile it, then +// thiserror is able to provide backtrace support. + +#![feature(error_generic_member_access)] + +use core::fmt::{self, Debug, Display}; +use std::error::{Error, Request}; + +struct MyError(Thing); +struct Thing; + +impl Debug for MyError { + fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { + unimplemented!() + } +} + +impl Display for MyError { + fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { + unimplemented!() + } +} + +impl Error for MyError { + fn provide<'a>(&'a self, request: &mut Request<'a>) { + request.provide_ref(&self.0); + } +} + +// Include in sccache cache key. +const _: Option<&str> = option_env!("RUSTC_BOOTSTRAP"); diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/rust-toolchain.toml b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/rust-toolchain.toml similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/rust-toolchain.toml rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/rust-toolchain.toml diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/aserror.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/aserror.rs similarity index 92% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/aserror.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/aserror.rs index 5fea84ef885d..1bced57a36ee 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/src/aserror.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/aserror.rs @@ -1,6 +1,7 @@ +use core::panic::UnwindSafe; use std::error::Error; -use std::panic::UnwindSafe; +#[doc(hidden)] pub trait AsDynError<'a>: Sealed { fn as_dyn_error(&self) -> &(dyn Error + 'a); } @@ -40,8 +41,9 @@ impl<'a> AsDynError<'a> for dyn Error + Send + Sync + UnwindSafe + 'a { } } +#[doc(hidden)] pub trait Sealed {} -impl<'a, T: Error + 'a> Sealed for T {} +impl Sealed for T {} impl<'a> Sealed for dyn Error + 'a {} impl<'a> Sealed for dyn Error + Send + 'a {} impl<'a> Sealed for dyn Error + Send + Sync + 'a {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/display.rs new file mode 100644 index 000000000000..3c43216a382d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/display.rs @@ -0,0 +1,40 @@ +use core::fmt::Display; +use std::path::{self, Path, PathBuf}; + +#[doc(hidden)] +pub trait AsDisplay<'a> { + // TODO: convert to generic associated type. + // https://github.com/dtolnay/thiserror/pull/253 + type Target: Display; + + fn as_display(&'a self) -> Self::Target; +} + +impl<'a, T> AsDisplay<'a> for &T +where + T: Display + 'a, +{ + type Target = &'a T; + + fn as_display(&'a self) -> Self::Target { + *self + } +} + +impl<'a> AsDisplay<'a> for Path { + type Target = path::Display<'a>; + + #[inline] + fn as_display(&'a self) -> Self::Target { + self.display() + } +} + +impl<'a> AsDisplay<'a> for PathBuf { + type Target = path::Display<'a>; + + #[inline] + fn as_display(&'a self) -> Self::Target { + self.display() + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/lib.rs new file mode 100644 index 000000000000..15872e3aaae6 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/lib.rs @@ -0,0 +1,290 @@ +//! [![github]](https://github.com/dtolnay/thiserror) [![crates-io]](https://crates.io/crates/thiserror) [![docs-rs]](https://docs.rs/thiserror) +//! +//! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github +//! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust +//! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs +//! +//!
+//! +//! This library provides a convenient derive macro for the standard library's +//! [`std::error::Error`] trait. +//! +//! [`std::error::Error`]: https://doc.rust-lang.org/std/error/trait.Error.html +//! +//!
+//! +//! # Example +//! +//! ```rust +//! # use std::io; +//! use thiserror::Error; +//! +//! #[derive(Error, Debug)] +//! pub enum DataStoreError { +//! #[error("data store disconnected")] +//! Disconnect(#[from] io::Error), +//! #[error("the data for key `{0}` is not available")] +//! Redaction(String), +//! #[error("invalid header (expected {expected:?}, found {found:?})")] +//! InvalidHeader { +//! expected: String, +//! found: String, +//! }, +//! #[error("unknown data store error")] +//! Unknown, +//! } +//! ``` +//! +//!
+//! +//! # Details +//! +//! - Thiserror deliberately does not appear in your public API. You get the +//! same thing as if you had written an implementation of `std::error::Error` +//! by hand, and switching from handwritten impls to thiserror or vice versa +//! is not a breaking change. +//! +//! - Errors may be enums, structs with named fields, tuple structs, or unit +//! structs. +//! +//! - A `Display` impl is generated for your error if you provide +//! `#[error("...")]` messages on the struct or each variant of your enum, as +//! shown above in the example. +//! +//! The messages support a shorthand for interpolating fields from the error. +//! +//! - `#[error("{var}")]` ⟶ `write!("{}", self.var)` +//! - `#[error("{0}")]` ⟶ `write!("{}", self.0)` +//! - `#[error("{var:?}")]` ⟶ `write!("{:?}", self.var)` +//! - `#[error("{0:?}")]` ⟶ `write!("{:?}", self.0)` +//! +//! These shorthands can be used together with any additional format args, +//! which may be arbitrary expressions. For example: +//! +//! ```rust +//! # use core::i32; +//! # use thiserror::Error; +//! # +//! #[derive(Error, Debug)] +//! pub enum Error { +//! #[error("invalid rdo_lookahead_frames {0} (expected < {})", i32::MAX)] +//! InvalidLookahead(u32), +//! } +//! ``` +//! +//! If one of the additional expression arguments needs to refer to a field of +//! the struct or enum, then refer to named fields as `.var` and tuple fields +//! as `.0`. +//! +//! ```rust +//! # use thiserror::Error; +//! # +//! # fn first_char(s: &String) -> char { +//! # s.chars().next().unwrap() +//! # } +//! # +//! # #[derive(Debug)] +//! # struct Limits { +//! # lo: usize, +//! # hi: usize, +//! # } +//! # +//! #[derive(Error, Debug)] +//! pub enum Error { +//! #[error("first letter must be lowercase but was {:?}", first_char(.0))] +//! WrongCase(String), +//! #[error("invalid index {idx}, expected at least {} and at most {}", .limits.lo, .limits.hi)] +//! OutOfBounds { idx: usize, limits: Limits }, +//! } +//! ``` +//! +//! - A `From` impl is generated for each variant that contains a `#[from]` +//! attribute. +//! +//! The variant using `#[from]` must not contain any other fields beyond the +//! source error (and possibly a backtrace — see below). Usually +//! `#[from]` fields are unnamed, but `#[from]` is allowed on a named field +//! too. +//! +//! ```rust +//! # use core::fmt::{self, Display}; +//! # use std::io; +//! # use thiserror::Error; +//! # +//! # mod globset { +//! # #[derive(thiserror::Error, Debug)] +//! # #[error("...")] +//! # pub struct Error; +//! # } +//! # +//! #[derive(Error, Debug)] +//! pub enum MyError { +//! Io(#[from] io::Error), +//! Glob(#[from] globset::Error), +//! } +//! # +//! # impl Display for MyError { +//! # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +//! # unimplemented!() +//! # } +//! # } +//! ``` +//! +//! - The Error trait's `source()` method is implemented to return whichever +//! field has a `#[source]` attribute or is named `source`, if any. This is +//! for identifying the underlying lower level error that caused your error. +//! +//! The `#[from]` attribute always implies that the same field is `#[source]`, +//! so you don't ever need to specify both attributes. +//! +//! Any error type that implements `std::error::Error` or dereferences to `dyn +//! std::error::Error` will work as a source. +//! +//! ```rust +//! # use core::fmt::{self, Display}; +//! # use thiserror::Error; +//! # +//! #[derive(Error, Debug)] +//! pub struct MyError { +//! msg: String, +//! #[source] // optional if field name is `source` +//! source: anyhow::Error, +//! } +//! # +//! # impl Display for MyError { +//! # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { +//! # unimplemented!() +//! # } +//! # } +//! ``` +//! +//! - The Error trait's `provide()` method is implemented to provide whichever +//! field has a type named `Backtrace`, if any, as a +//! `std::backtrace::Backtrace`. Using `Backtrace` in errors requires a +//! nightly compiler with Rust version 1.73 or newer. +//! +//! ```rust +//! # const IGNORE: &str = stringify! { +//! use std::backtrace::Backtrace; +//! +//! #[derive(Error, Debug)] +//! pub struct MyError { +//! msg: String, +//! backtrace: Backtrace, // automatically detected +//! } +//! # }; +//! ``` +//! +//! - If a field is both a source (named `source`, or has `#[source]` or +//! `#[from]` attribute) *and* is marked `#[backtrace]`, then the Error +//! trait's `provide()` method is forwarded to the source's `provide` so that +//! both layers of the error share the same backtrace. The `#[backtrace]` +//! attribute requires a nightly compiler with Rust version 1.73 or newer. +//! +//! ```rust +//! # const IGNORE: &str = stringify! { +//! #[derive(Error, Debug)] +//! pub enum MyError { +//! Io { +//! #[backtrace] +//! source: io::Error, +//! }, +//! } +//! # }; +//! ``` +//! +//! - For variants that use `#[from]` and also contain a `Backtrace` field, a +//! backtrace is captured from within the `From` impl. +//! +//! ```rust +//! # const IGNORE: &str = stringify! { +//! #[derive(Error, Debug)] +//! pub enum MyError { +//! Io { +//! #[from] +//! source: io::Error, +//! backtrace: Backtrace, +//! }, +//! } +//! # }; +//! ``` +//! +//! - Errors may use `error(transparent)` to forward the source and Display +//! methods straight through to an underlying error without adding an +//! additional message. This would be appropriate for enums that need an +//! "anything else" variant. +//! +//! ``` +//! # use thiserror::Error; +//! # +//! #[derive(Error, Debug)] +//! pub enum MyError { +//! # /* +//! ... +//! # */ +//! +//! #[error(transparent)] +//! Other(#[from] anyhow::Error), // source and Display delegate to anyhow::Error +//! } +//! ``` +//! +//! Another use case is hiding implementation details of an error +//! representation behind an opaque error type, so that the representation is +//! able to evolve without breaking the crate's public API. +//! +//! ``` +//! # use thiserror::Error; +//! # +//! // PublicError is public, but opaque and easy to keep compatible. +//! #[derive(Error, Debug)] +//! #[error(transparent)] +//! pub struct PublicError(#[from] ErrorRepr); +//! +//! impl PublicError { +//! // Accessors for anything we do want to expose publicly. +//! } +//! +//! // Private and free to change across minor version of the crate. +//! #[derive(Error, Debug)] +//! enum ErrorRepr { +//! # /* +//! ... +//! # */ +//! } +//! ``` +//! +//! - See also the [`anyhow`] library for a convenient single error type to use +//! in application code. +//! +//! [`anyhow`]: https://github.com/dtolnay/anyhow + +#![doc(html_root_url = "https://docs.rs/thiserror/1.0.65")] +#![allow( + clippy::module_name_repetitions, + clippy::needless_lifetimes, + clippy::return_self_not_must_use, + clippy::wildcard_imports +)] +#![cfg_attr(error_generic_member_access, feature(error_generic_member_access))] + +#[cfg(all(thiserror_nightly_testing, not(error_generic_member_access)))] +compile_error!("Build script probe failed to compile."); + +mod aserror; +mod display; +#[cfg(error_generic_member_access)] +mod provide; + +pub use thiserror_impl::*; + +// Not public API. +#[doc(hidden)] +pub mod __private { + #[doc(hidden)] + pub use crate::aserror::AsDynError; + #[doc(hidden)] + pub use crate::display::AsDisplay; + #[cfg(error_generic_member_access)] + #[doc(hidden)] + pub use crate::provide::ThiserrorProvide; +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/provide.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/provide.rs new file mode 100644 index 000000000000..7b4e922389bc --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/src/provide.rs @@ -0,0 +1,20 @@ +use std::error::{Error, Request}; + +#[doc(hidden)] +pub trait ThiserrorProvide: Sealed { + fn thiserror_provide<'a>(&'a self, request: &mut Request<'a>); +} + +impl ThiserrorProvide for T +where + T: Error + ?Sized, +{ + #[inline] + fn thiserror_provide<'a>(&'a self, request: &mut Request<'a>) { + self.provide(request); + } +} + +#[doc(hidden)] +pub trait Sealed {} +impl Sealed for T {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/compiletest.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/compiletest.rs new file mode 100644 index 000000000000..23a6a065ec96 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/compiletest.rs @@ -0,0 +1,7 @@ +#[rustversion::attr(not(nightly), ignore = "requires nightly")] +#[cfg_attr(miri, ignore = "incompatible with miri")] +#[test] +fn ui() { + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/*.rs"); +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_backtrace.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_backtrace.rs new file mode 100644 index 000000000000..8f11da395377 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_backtrace.rs @@ -0,0 +1,277 @@ +#![cfg_attr(thiserror_nightly_testing, feature(error_generic_member_access))] + +use thiserror::Error; + +#[derive(Error, Debug)] +#[error("...")] +pub struct Inner; + +#[cfg(thiserror_nightly_testing)] +#[derive(Error, Debug)] +#[error("...")] +pub struct InnerBacktrace { + backtrace: std::backtrace::Backtrace, +} + +#[cfg(thiserror_nightly_testing)] +pub mod structs { + use super::{Inner, InnerBacktrace}; + use std::backtrace::Backtrace; + use std::error::{self, Error}; + use std::sync::Arc; + use thiserror::Error; + + #[derive(Error, Debug)] + #[error("...")] + pub struct PlainBacktrace { + backtrace: Backtrace, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct ExplicitBacktrace { + #[backtrace] + backtrace: Backtrace, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct OptBacktrace { + #[backtrace] + backtrace: Option, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct ArcBacktrace { + #[backtrace] + backtrace: Arc, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct BacktraceFrom { + #[from] + source: Inner, + #[backtrace] + backtrace: Backtrace, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct CombinedBacktraceFrom { + #[from] + #[backtrace] + source: InnerBacktrace, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct OptBacktraceFrom { + #[from] + source: Inner, + #[backtrace] + backtrace: Option, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct ArcBacktraceFrom { + #[from] + source: Inner, + #[backtrace] + backtrace: Arc, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct AnyhowBacktrace { + #[backtrace] + source: anyhow::Error, + } + + #[derive(Error, Debug)] + #[error("...")] + pub struct BoxDynErrorBacktrace { + #[backtrace] + source: Box, + } + + #[test] + fn test_backtrace() { + let error = PlainBacktrace { + backtrace: Backtrace::capture(), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = ExplicitBacktrace { + backtrace: Backtrace::capture(), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = OptBacktrace { + backtrace: Some(Backtrace::capture()), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = ArcBacktrace { + backtrace: Arc::new(Backtrace::capture()), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = BacktraceFrom::from(Inner); + assert!(error::request_ref::(&error).is_some()); + + let error = CombinedBacktraceFrom::from(InnerBacktrace { + backtrace: Backtrace::capture(), + }); + assert!(error::request_ref::(&error).is_some()); + + let error = OptBacktraceFrom::from(Inner); + assert!(error::request_ref::(&error).is_some()); + + let error = ArcBacktraceFrom::from(Inner); + assert!(error::request_ref::(&error).is_some()); + + let error = AnyhowBacktrace { + source: anyhow::Error::msg("..."), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = BoxDynErrorBacktrace { + source: Box::new(PlainBacktrace { + backtrace: Backtrace::capture(), + }), + }; + assert!(error::request_ref::(&error).is_some()); + } +} + +#[cfg(thiserror_nightly_testing)] +pub mod enums { + use super::{Inner, InnerBacktrace}; + use std::backtrace::Backtrace; + use std::error; + use std::sync::Arc; + use thiserror::Error; + + #[derive(Error, Debug)] + pub enum PlainBacktrace { + #[error("...")] + Test { backtrace: Backtrace }, + } + + #[derive(Error, Debug)] + pub enum ExplicitBacktrace { + #[error("...")] + Test { + #[backtrace] + backtrace: Backtrace, + }, + } + + #[derive(Error, Debug)] + pub enum OptBacktrace { + #[error("...")] + Test { + #[backtrace] + backtrace: Option, + }, + } + + #[derive(Error, Debug)] + pub enum ArcBacktrace { + #[error("...")] + Test { + #[backtrace] + backtrace: Arc, + }, + } + + #[derive(Error, Debug)] + pub enum BacktraceFrom { + #[error("...")] + Test { + #[from] + source: Inner, + #[backtrace] + backtrace: Backtrace, + }, + } + + #[derive(Error, Debug)] + pub enum CombinedBacktraceFrom { + #[error("...")] + Test { + #[from] + #[backtrace] + source: InnerBacktrace, + }, + } + + #[derive(Error, Debug)] + pub enum OptBacktraceFrom { + #[error("...")] + Test { + #[from] + source: Inner, + #[backtrace] + backtrace: Option, + }, + } + + #[derive(Error, Debug)] + pub enum ArcBacktraceFrom { + #[error("...")] + Test { + #[from] + source: Inner, + #[backtrace] + backtrace: Arc, + }, + } + + #[test] + fn test_backtrace() { + let error = PlainBacktrace::Test { + backtrace: Backtrace::capture(), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = ExplicitBacktrace::Test { + backtrace: Backtrace::capture(), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = OptBacktrace::Test { + backtrace: Some(Backtrace::capture()), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = ArcBacktrace::Test { + backtrace: Arc::new(Backtrace::capture()), + }; + assert!(error::request_ref::(&error).is_some()); + + let error = BacktraceFrom::from(Inner); + assert!(error::request_ref::(&error).is_some()); + + let error = CombinedBacktraceFrom::from(InnerBacktrace { + backtrace: Backtrace::capture(), + }); + assert!(error::request_ref::(&error).is_some()); + + let error = OptBacktraceFrom::from(Inner); + assert!(error::request_ref::(&error).is_some()); + + let error = ArcBacktraceFrom::from(Inner); + assert!(error::request_ref::(&error).is_some()); + } +} + +#[test] +#[cfg_attr( + not(thiserror_nightly_testing), + ignore = "requires `--cfg=thiserror_nightly_testing`" +)] +fn test_backtrace() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_deprecated.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_deprecated.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_deprecated.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_deprecated.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_display.rs similarity index 76% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_display.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_display.rs index 99ce2fdede7a..89bdc4af8b74 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_display.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_display.rs @@ -1,4 +1,10 @@ -use std::fmt::{self, Display}; +#![allow( + clippy::needless_lifetimes, + clippy::needless_raw_string_hashes, + clippy::uninlined_format_args +)] + +use core::fmt::{self, Display}; use thiserror::Error; fn assert(expected: &str, value: T) { @@ -232,6 +238,18 @@ fn test_field() { assert("0", Error(Inner { data: 0 })); } +#[test] +fn test_nested_tuple_field() { + #[derive(Debug)] + struct Inner(usize); + + #[derive(Error, Debug)] + #[error("{}", .0.0)] + struct Error(Inner); + + assert("0", Error(Inner(0))); +} + #[test] fn test_macro_rules() { // Regression test for https://github.com/dtolnay/thiserror/issues/86 @@ -299,3 +317,58 @@ fn test_keyword() { assert("error: 1", Error); } + +#[test] +fn test_str_special_chars() { + #[derive(Error, Debug)] + pub enum Error { + #[error("brace left {{")] + BraceLeft, + #[error("brace left 2 \x7B\x7B")] + BraceLeft2, + #[error("brace left 3 \u{7B}\u{7B}")] + BraceLeft3, + #[error("brace right }}")] + BraceRight, + #[error("brace right 2 \x7D\x7D")] + BraceRight2, + #[error("brace right 3 \u{7D}\u{7D}")] + BraceRight3, + #[error( + "new_\ +line" + )] + NewLine, + #[error("escape24 \u{78}")] + Escape24, + } + + assert("brace left {", Error::BraceLeft); + assert("brace left 2 {", Error::BraceLeft2); + assert("brace left 3 {", Error::BraceLeft3); + assert("brace right }", Error::BraceRight); + assert("brace right 2 }", Error::BraceRight2); + assert("brace right 3 }", Error::BraceRight3); + assert("new_line", Error::NewLine); + assert("escape24 x", Error::Escape24); +} + +#[test] +fn test_raw_str() { + #[derive(Error, Debug)] + pub enum Error { + #[error(r#"raw brace left {{"#)] + BraceLeft, + #[error(r#"raw brace left 2 \x7B"#)] + BraceLeft2, + #[error(r#"raw brace right }}"#)] + BraceRight, + #[error(r#"raw brace right 2 \x7D"#)] + BraceRight2, + } + + assert(r#"raw brace left {"#, Error::BraceLeft); + assert(r#"raw brace left 2 \x7B"#, Error::BraceLeft2); + assert(r#"raw brace right }"#, Error::BraceRight); + assert(r#"raw brace right 2 \x7D"#, Error::BraceRight2); +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_error.rs similarity index 96% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_error.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_error.rs index fab934d7893f..eb52cefbd9e9 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_error.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_error.rs @@ -1,6 +1,6 @@ #![allow(dead_code)] -use std::fmt::{self, Display}; +use core::fmt::{self, Display}; use std::io; use thiserror::Error; diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_expr.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_expr.rs similarity index 96% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_expr.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_expr.rs index 34de56087bb3..c5e3b4b1b10e 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_expr.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_expr.rs @@ -1,6 +1,6 @@ -#![allow(clippy::iter_cloned_collect, clippy::option_if_let_else)] +#![allow(clippy::iter_cloned_collect, clippy::uninlined_format_args)] -use std::fmt::Display; +use core::fmt::Display; use thiserror::Error; // Some of the elaborate cases from the rcc codebase, which is a C compiler in diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_from.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_from.rs similarity index 95% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_from.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_from.rs index 1f387055f523..51af40b1579a 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_from.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_from.rs @@ -1,3 +1,5 @@ +#![allow(clippy::extra_unused_type_parameters)] + use std::io; use thiserror::Error; diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_generics.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_generics.rs similarity index 95% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_generics.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_generics.rs index 4ab9f3778655..d7790e2dfe30 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_generics.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_generics.rs @@ -1,6 +1,6 @@ -#![allow(clippy::needless_late_init)] +#![allow(clippy::needless_late_init, clippy::uninlined_format_args)] -use std::fmt::{self, Debug, Display}; +use core::fmt::{self, Debug, Display}; use thiserror::Error; pub struct NoFormat; @@ -158,4 +158,4 @@ pub struct StructFromGeneric { // #[derive(Error, Debug)] #[error(transparent)] -pub struct StructTransparentGeneric(E); +pub struct StructTransparentGeneric(pub E); diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_lints.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_lints.rs similarity index 90% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_lints.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_lints.rs index 59699a4a3f40..cafcbc0f3607 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_lints.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_lints.rs @@ -1,3 +1,5 @@ +#![allow(clippy::mixed_attributes_style)] + use thiserror::Error; pub use std::error::Error; diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_option.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_option.rs similarity index 79% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_option.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_option.rs index ed5287dc9272..fbdbec089cf8 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_option.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_option.rs @@ -1,7 +1,4 @@ -#![cfg_attr( - thiserror_nightly_testing, - feature(error_generic_member_access, provide_any) -)] +#![cfg_attr(thiserror_nightly_testing, feature(error_generic_member_access))] #[cfg(thiserror_nightly_testing)] pub mod structs { @@ -12,39 +9,39 @@ pub mod structs { #[error("...")] pub struct OptSourceNoBacktrace { #[source] - source: Option, + pub source: Option, } #[derive(Error, Debug)] #[error("...")] pub struct OptSourceAlwaysBacktrace { #[source] - source: Option, - backtrace: Backtrace, + pub source: Option, + pub backtrace: Backtrace, } #[derive(Error, Debug)] #[error("...")] pub struct NoSourceOptBacktrace { #[backtrace] - backtrace: Option, + pub backtrace: Option, } #[derive(Error, Debug)] #[error("...")] pub struct AlwaysSourceOptBacktrace { - source: anyhow::Error, + pub source: anyhow::Error, #[backtrace] - backtrace: Option, + pub backtrace: Option, } #[derive(Error, Debug)] #[error("...")] pub struct OptSourceOptBacktrace { #[source] - source: Option, + pub source: Option, #[backtrace] - backtrace: Option, + pub backtrace: Option, } } @@ -104,5 +101,8 @@ pub mod enums { } #[test] -#[cfg_attr(not(thiserror_nightly_testing), ignore)] +#[cfg_attr( + not(thiserror_nightly_testing), + ignore = "requires `--cfg=thiserror_nightly_testing`" +)] fn test_option() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_path.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_path.rs similarity index 97% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_path.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_path.rs index a34a3d74e999..f054077945fa 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_path.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_path.rs @@ -1,5 +1,5 @@ +use core::fmt::Display; use ref_cast::RefCast; -use std::fmt::Display; use std::path::{Path, PathBuf}; use thiserror::Error; diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_transparent.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_transparent.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/test_transparent.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/test_transparent.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/bad-field-attr.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/bad-field-attr.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/bad-field-attr.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/bad-field-attr.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/bad-field-attr.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/bad-field-attr.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/bad-field-attr.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/bad-field-attr.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/concat-display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/concat-display.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/concat-display.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/concat-display.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/concat-display.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/concat-display.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/concat-display.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/concat-display.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-enum-source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-enum-source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-enum-source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-enum-source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-enum-source.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-enum-source.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-enum-source.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-enum-source.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-fmt.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-fmt.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-fmt.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-fmt.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-fmt.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-fmt.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-fmt.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-fmt.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-struct-source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-struct-source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-struct-source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-struct-source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-struct-source.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-struct-source.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-struct-source.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-struct-source.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-transparent.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-transparent.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-transparent.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-transparent.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-transparent.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-transparent.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/duplicate-transparent.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/duplicate-transparent.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/fallback-impl-with-display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/fallback-impl-with-display.rs new file mode 100644 index 000000000000..23dcf2877025 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/fallback-impl-with-display.rs @@ -0,0 +1,14 @@ +use core::fmt::{self, Display}; +use thiserror::Error; + +#[derive(Error, Debug)] +#[error] +pub struct MyError; + +impl Display for MyError { + fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result { + unimplemented!() + } +} + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/fallback-impl-with-display.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/fallback-impl-with-display.stderr new file mode 100644 index 000000000000..6bd3730731c8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/fallback-impl-with-display.stderr @@ -0,0 +1,16 @@ +error: expected attribute arguments in parentheses: #[error(...)] + --> tests/ui/fallback-impl-with-display.rs:5:3 + | +5 | #[error] + | ^^^^^ + +error[E0119]: conflicting implementations of trait `std::fmt::Display` for type `MyError` + --> tests/ui/fallback-impl-with-display.rs:4:10 + | +4 | #[derive(Error, Debug)] + | ^^^^^ conflicting implementation for `MyError` +... +8 | impl Display for MyError { + | ------------------------ first implementation here + | + = note: this error originates in the derive macro `Error` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-backtrace-backtrace.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-backtrace-backtrace.rs new file mode 100644 index 000000000000..3b781ac4e1f4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-backtrace-backtrace.rs @@ -0,0 +1,15 @@ +// https://github.com/dtolnay/thiserror/issues/163 + +use std::backtrace::Backtrace; +use thiserror::Error; + +#[derive(Error, Debug)] +#[error("...")] +pub struct Error( + #[from] + #[backtrace] + std::io::Error, + Backtrace, +); + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-backtrace-backtrace.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-backtrace-backtrace.stderr new file mode 100644 index 000000000000..5c0b9a3bfa24 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-backtrace-backtrace.stderr @@ -0,0 +1,5 @@ +error: deriving From requires no fields other than source and backtrace + --> tests/ui/from-backtrace-backtrace.rs:9:5 + | +9 | #[from] + | ^^^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-not-source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-not-source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-not-source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-not-source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-not-source.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-not-source.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/from-not-source.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/from-not-source.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/invalid-input-impl-anyway.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/invalid-input-impl-anyway.rs new file mode 100644 index 000000000000..0a0bcbee8ed5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/invalid-input-impl-anyway.rs @@ -0,0 +1,11 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +#[error] +pub struct MyError; + +fn main() { + // No error on the following line. Thiserror emits an Error impl despite the + // bad attribute. + _ = &MyError as &dyn std::error::Error; +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/invalid-input-impl-anyway.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/invalid-input-impl-anyway.stderr new file mode 100644 index 000000000000..b98c31e9c6b0 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/invalid-input-impl-anyway.stderr @@ -0,0 +1,5 @@ +error: expected attribute arguments in parentheses: #[error(...)] + --> tests/ui/invalid-input-impl-anyway.rs:4:3 + | +4 | #[error] + | ^^^^^ diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/lifetime.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/lifetime.rs similarity index 95% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/lifetime.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/lifetime.rs index 698f8c4e8a13..a82909d627e1 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/lifetime.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/lifetime.rs @@ -1,4 +1,4 @@ -use std::fmt::Debug; +use core::fmt::Debug; use thiserror::Error; #[derive(Error, Debug)] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/lifetime.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/lifetime.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/lifetime.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/lifetime.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-display.rs new file mode 100644 index 000000000000..31e23fe683d7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-display.rs @@ -0,0 +1,9 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum MyError { + First, + Second, +} + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-display.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-display.stderr new file mode 100644 index 000000000000..48c9ded9fa17 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-display.stderr @@ -0,0 +1,13 @@ +error[E0277]: `MyError` doesn't implement `std::fmt::Display` + --> tests/ui/missing-display.rs:4:10 + | +4 | pub enum MyError { + | ^^^^^^^ `MyError` cannot be formatted with the default formatter + | + = help: the trait `std::fmt::Display` is not implemented for `MyError` + = note: in format strings you may be able to use `{:?}` (or {:#?} for pretty-print) instead +note: required by a bound in `std::error::Error` + --> $RUST/core/src/error.rs + | + | pub trait Error: Debug + Display { + | ^^^^^^^ required by this bound in `Error` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/missing-fmt.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-fmt.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/missing-fmt.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-fmt.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/missing-fmt.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-fmt.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/missing-fmt.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/missing-fmt.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/no-display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/no-display.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/no-display.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/no-display.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/no-display.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/no-display.stderr new file mode 100644 index 000000000000..88d0092678cd --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/no-display.stderr @@ -0,0 +1,20 @@ +error[E0599]: the method `as_display` exists for reference `&NoDisplay`, but its trait bounds were not satisfied + --> tests/ui/no-display.rs:7:9 + | +4 | struct NoDisplay; + | ---------------- doesn't satisfy `NoDisplay: std::fmt::Display` +... +7 | #[error("thread: {thread}")] + | ^^^^^^^^^^^^^^^^^^ method cannot be called on `&NoDisplay` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `NoDisplay: std::fmt::Display` + which is required by `&NoDisplay: AsDisplay<'_>` +note: the trait `std::fmt::Display` must be implemented + --> $RUST/core/src/fmt/mod.rs + | + | pub trait Display { + | ^^^^^^^^^^^^^^^^^ + = help: items from traits can only be used if the trait is implemented and in scope + = note: the following trait defines an item `as_display`, perhaps you need to implement it: + candidate #1: `AsDisplay` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-enum-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-not-error.rs similarity index 75% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-enum-not-error.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-not-error.rs index 3eb0d3e866e7..dae2285b830c 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-enum-not-error.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-not-error.rs @@ -6,9 +6,7 @@ pub struct NotError; #[derive(Error, Debug)] #[error("...")] pub enum ErrorEnum { - Broken { - source: NotError, - }, + Broken { source: NotError }, } fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-not-error.stderr new file mode 100644 index 000000000000..649d77df81a8 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-not-error.stderr @@ -0,0 +1,22 @@ +error[E0599]: the method `as_dyn_error` exists for reference `&NotError`, but its trait bounds were not satisfied + --> tests/ui/source-enum-not-error.rs:9:14 + | +4 | pub struct NotError; + | ------------------- doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error` +... +9 | Broken { source: NotError }, + | ^^^^^^ method cannot be called on `&NotError` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `NotError: std::error::Error` + which is required by `NotError: AsDynError<'_>` + `&NotError: std::error::Error` + which is required by `&NotError: AsDynError<'_>` +note: the trait `std::error::Error` must be implemented + --> $RUST/core/src/error.rs + | + | pub trait Error: Debug + Display { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: items from traits can only be used if the trait is implemented and in scope + = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it: + candidate #1: `AsDynError` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-unnamed-field-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-unnamed-field-not-error.rs new file mode 100644 index 000000000000..a877c2cd0fb9 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-unnamed-field-not-error.rs @@ -0,0 +1,12 @@ +use thiserror::Error; + +#[derive(Debug)] +pub struct NotError; + +#[derive(Error, Debug)] +#[error("...")] +pub enum ErrorEnum { + Broken(#[source] NotError), +} + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-unnamed-field-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-unnamed-field-not-error.stderr new file mode 100644 index 000000000000..a1fe2b5b5351 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-enum-unnamed-field-not-error.stderr @@ -0,0 +1,22 @@ +error[E0599]: the method `as_dyn_error` exists for reference `&NotError`, but its trait bounds were not satisfied + --> tests/ui/source-enum-unnamed-field-not-error.rs:9:14 + | +4 | pub struct NotError; + | ------------------- doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error` +... +9 | Broken(#[source] NotError), + | ^^^^^^ method cannot be called on `&NotError` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `NotError: std::error::Error` + which is required by `NotError: AsDynError<'_>` + `&NotError: std::error::Error` + which is required by `&NotError: AsDynError<'_>` +note: the trait `std::error::Error` must be implemented + --> $RUST/core/src/error.rs + | + | pub trait Error: Debug + Display { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: items from traits can only be used if the trait is implemented and in scope + = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it: + candidate #1: `AsDynError` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-struct-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-not-error.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/source-struct-not-error.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-not-error.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-not-error.stderr new file mode 100644 index 000000000000..07cd67ac6422 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-not-error.stderr @@ -0,0 +1,20 @@ +error[E0599]: the method `as_dyn_error` exists for struct `NotError`, but its trait bounds were not satisfied + --> tests/ui/source-struct-not-error.rs:9:5 + | +4 | struct NotError; + | --------------- method `as_dyn_error` not found for this struct because it doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error` +... +9 | source: NotError, + | ^^^^^^ method cannot be called on `NotError` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `NotError: std::error::Error` + which is required by `NotError: AsDynError<'_>` +note: the trait `std::error::Error` must be implemented + --> $RUST/core/src/error.rs + | + | pub trait Error: Debug + Display { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: items from traits can only be used if the trait is implemented and in scope + = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it: + candidate #1: `AsDynError` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-unnamed-field-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-unnamed-field-not-error.rs new file mode 100644 index 000000000000..160b6b247ff5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-unnamed-field-not-error.rs @@ -0,0 +1,10 @@ +use thiserror::Error; + +#[derive(Debug)] +struct NotError; + +#[derive(Error, Debug)] +#[error("...")] +pub struct ErrorStruct(#[source] NotError); + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-unnamed-field-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-unnamed-field-not-error.stderr new file mode 100644 index 000000000000..2022ea67cd42 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/source-struct-unnamed-field-not-error.stderr @@ -0,0 +1,20 @@ +error[E0599]: the method `as_dyn_error` exists for struct `NotError`, but its trait bounds were not satisfied + --> tests/ui/source-struct-unnamed-field-not-error.rs:8:26 + | +4 | struct NotError; + | --------------- method `as_dyn_error` not found for this struct because it doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error` +... +8 | pub struct ErrorStruct(#[source] NotError); + | ^^^^^^ method cannot be called on `NotError` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `NotError: std::error::Error` + which is required by `NotError: AsDynError<'_>` +note: the trait `std::error::Error` must be implemented + --> $RUST/core/src/error.rs + | + | pub trait Error: Debug + Display { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = help: items from traits can only be used if the trait is implemented and in scope + = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it: + candidate #1: `AsDynError` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-display.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-display.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-display.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-display.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-display.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-display.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-display.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-display.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-many.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-many.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-many.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-many.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-many.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-many.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-many.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-many.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-not-error.rs new file mode 100644 index 000000000000..80ccfc973a8e --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-not-error.rs @@ -0,0 +1,9 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum Error { + #[error(transparent)] + Other { message: String }, +} + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-not-error.stderr new file mode 100644 index 000000000000..bb836d4e8d58 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-not-error.stderr @@ -0,0 +1,20 @@ +error[E0599]: the method `as_dyn_error` exists for reference `&String`, but its trait bounds were not satisfied + --> tests/ui/transparent-enum-not-error.rs:5:13 + | +5 | #[error(transparent)] + | ^^^^^^^^^^^ method cannot be called on `&String` due to unsatisfied trait bounds + | + ::: $RUST/alloc/src/string.rs + | + | pub struct String { + | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error` + | + = note: the following trait bounds were not satisfied: + `String: std::error::Error` + which is required by `String: AsDynError<'_>` + `&String: std::error::Error` + which is required by `&String: AsDynError<'_>` + `str: Sized` + which is required by `str: AsDynError<'_>` + `str: std::error::Error` + which is required by `str: AsDynError<'_>` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-source.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-source.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-enum-source.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-source.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-unnamed-field-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-unnamed-field-not-error.rs new file mode 100644 index 000000000000..87c32e0b6cfb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-unnamed-field-not-error.rs @@ -0,0 +1,9 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum Error { + #[error(transparent)] + Other(String), +} + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-unnamed-field-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-unnamed-field-not-error.stderr new file mode 100644 index 000000000000..f337c592eebe --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-enum-unnamed-field-not-error.stderr @@ -0,0 +1,20 @@ +error[E0599]: the method `as_dyn_error` exists for reference `&String`, but its trait bounds were not satisfied + --> tests/ui/transparent-enum-unnamed-field-not-error.rs:5:13 + | +5 | #[error(transparent)] + | ^^^^^^^^^^^ method cannot be called on `&String` due to unsatisfied trait bounds + | + ::: $RUST/alloc/src/string.rs + | + | pub struct String { + | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error` + | + = note: the following trait bounds were not satisfied: + `String: std::error::Error` + which is required by `String: AsDynError<'_>` + `&String: std::error::Error` + which is required by `&String: AsDynError<'_>` + `str: Sized` + which is required by `str: AsDynError<'_>` + `str: std::error::Error` + which is required by `str: AsDynError<'_>` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-many.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-many.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-many.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-many.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-many.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-many.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-many.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-many.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-not-error.rs new file mode 100644 index 000000000000..811ff539589f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-not-error.rs @@ -0,0 +1,9 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +#[error(transparent)] +pub struct Error { + message: String, +} + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-not-error.stderr new file mode 100644 index 000000000000..ee50d03a7b20 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-not-error.stderr @@ -0,0 +1,18 @@ +error[E0599]: the method `as_dyn_error` exists for struct `String`, but its trait bounds were not satisfied + --> tests/ui/transparent-struct-not-error.rs:4:9 + | +4 | #[error(transparent)] + | ^^^^^^^^^^^ method cannot be called on `String` due to unsatisfied trait bounds + | + ::: $RUST/alloc/src/string.rs + | + | pub struct String { + | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error` + | + = note: the following trait bounds were not satisfied: + `String: std::error::Error` + which is required by `String: AsDynError<'_>` + `str: Sized` + which is required by `str: AsDynError<'_>` + `str: std::error::Error` + which is required by `str: AsDynError<'_>` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-source.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-source.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/transparent-struct-source.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-source.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-unnamed-field-not-error.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-unnamed-field-not-error.rs new file mode 100644 index 000000000000..b4f7fbbfd5e2 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-unnamed-field-not-error.rs @@ -0,0 +1,7 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +#[error(transparent)] +pub struct Error(String); + +fn main() {} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-unnamed-field-not-error.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-unnamed-field-not-error.stderr new file mode 100644 index 000000000000..c3d6c0023d9d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/transparent-struct-unnamed-field-not-error.stderr @@ -0,0 +1,18 @@ +error[E0599]: the method `as_dyn_error` exists for struct `String`, but its trait bounds were not satisfied + --> tests/ui/transparent-struct-unnamed-field-not-error.rs:4:9 + | +4 | #[error(transparent)] + | ^^^^^^^^^^^ method cannot be called on `String` due to unsatisfied trait bounds + | + ::: $RUST/alloc/src/string.rs + | + | pub struct String { + | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error` + | + = note: the following trait bounds were not satisfied: + `String: std::error::Error` + which is required by `String: AsDynError<'_>` + `str: Sized` + which is required by `str: AsDynError<'_>` + `str: std::error::Error` + which is required by `str: AsDynError<'_>` diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-field-fmt.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-field-fmt.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-field-fmt.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-field-fmt.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-field-fmt.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-field-fmt.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-field-fmt.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-field-fmt.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-struct-source.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-struct-source.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-struct-source.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-struct-source.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-struct-source.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-struct-source.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/unexpected-struct-source.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/unexpected-struct-source.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/union.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/union.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/union.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/union.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/union.stderr b/third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/union.stderr similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-1.0.38/tests/ui/union.stderr rename to third_party/rust/chromium_crates_io/vendor/thiserror-1.0.65/tests/ui/union.stderr diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/.cargo_vcs_info.json deleted file mode 100644 index 9f6267f3b1f8..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "74bfe75eb25ba9d39b0ae5b570d611855cbc5086" - }, - "path_in_vcs": "impl" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/Cargo.toml deleted file mode 100644 index ca2a0b4f4de9..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -rust-version = "1.31" -name = "thiserror-impl" -version = "1.0.38" -authors = ["David Tolnay "] -description = "Implementation detail of the `thiserror` crate" -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/thiserror" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[lib] -proc-macro = true - -[dependencies.proc-macro2] -version = "1.0" - -[dependencies.quote] -version = "1.0" - -[dependencies.syn] -version = "1.0.45" diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/Cargo.toml.orig deleted file mode 100644 index dee506a5755d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/Cargo.toml.orig +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "thiserror-impl" -version = "1.0.38" -authors = ["David Tolnay "] -description = "Implementation detail of the `thiserror` crate" -edition = "2018" -license = "MIT OR Apache-2.0" -repository = "https://github.com/dtolnay/thiserror" -rust-version = "1.31" - -[lib] -proc-macro = true - -[dependencies] -proc-macro2 = "1.0" -quote = "1.0" -syn = "1.0.45" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/attr.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/attr.rs deleted file mode 100644 index 9963fd6dbfff..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/attr.rs +++ /dev/null @@ -1,213 +0,0 @@ -use proc_macro2::{Delimiter, Group, Span, TokenStream, TokenTree}; -use quote::{format_ident, quote, ToTokens}; -use std::collections::BTreeSet as Set; -use std::iter::FromIterator; -use syn::parse::{Nothing, ParseStream}; -use syn::{ - braced, bracketed, parenthesized, token, Attribute, Error, Ident, Index, LitInt, LitStr, - Result, Token, -}; - -pub struct Attrs<'a> { - pub display: Option>, - pub source: Option<&'a Attribute>, - pub backtrace: Option<&'a Attribute>, - pub from: Option<&'a Attribute>, - pub transparent: Option>, -} - -#[derive(Clone)] -pub struct Display<'a> { - pub original: &'a Attribute, - pub fmt: LitStr, - pub args: TokenStream, - pub has_bonus_display: bool, - pub implied_bounds: Set<(usize, Trait)>, -} - -#[derive(Copy, Clone)] -pub struct Transparent<'a> { - pub original: &'a Attribute, - pub span: Span, -} - -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] -pub enum Trait { - Debug, - Display, - Octal, - LowerHex, - UpperHex, - Pointer, - Binary, - LowerExp, - UpperExp, -} - -pub fn get(input: &[Attribute]) -> Result { - let mut attrs = Attrs { - display: None, - source: None, - backtrace: None, - from: None, - transparent: None, - }; - - for attr in input { - if attr.path.is_ident("error") { - parse_error_attribute(&mut attrs, attr)?; - } else if attr.path.is_ident("source") { - require_empty_attribute(attr)?; - if attrs.source.is_some() { - return Err(Error::new_spanned(attr, "duplicate #[source] attribute")); - } - attrs.source = Some(attr); - } else if attr.path.is_ident("backtrace") { - require_empty_attribute(attr)?; - if attrs.backtrace.is_some() { - return Err(Error::new_spanned(attr, "duplicate #[backtrace] attribute")); - } - attrs.backtrace = Some(attr); - } else if attr.path.is_ident("from") { - if !attr.tokens.is_empty() { - // Assume this is meant for derive_more crate or something. - continue; - } - if attrs.from.is_some() { - return Err(Error::new_spanned(attr, "duplicate #[from] attribute")); - } - attrs.from = Some(attr); - } - } - - Ok(attrs) -} - -fn parse_error_attribute<'a>(attrs: &mut Attrs<'a>, attr: &'a Attribute) -> Result<()> { - syn::custom_keyword!(transparent); - - attr.parse_args_with(|input: ParseStream| { - if let Some(kw) = input.parse::>()? { - if attrs.transparent.is_some() { - return Err(Error::new_spanned( - attr, - "duplicate #[error(transparent)] attribute", - )); - } - attrs.transparent = Some(Transparent { - original: attr, - span: kw.span, - }); - return Ok(()); - } - - let display = Display { - original: attr, - fmt: input.parse()?, - args: parse_token_expr(input, false)?, - has_bonus_display: false, - implied_bounds: Set::new(), - }; - if attrs.display.is_some() { - return Err(Error::new_spanned( - attr, - "only one #[error(...)] attribute is allowed", - )); - } - attrs.display = Some(display); - Ok(()) - }) -} - -fn parse_token_expr(input: ParseStream, mut begin_expr: bool) -> Result { - let mut tokens = Vec::new(); - while !input.is_empty() { - if begin_expr && input.peek(Token![.]) { - if input.peek2(Ident) { - input.parse::()?; - begin_expr = false; - continue; - } - if input.peek2(LitInt) { - input.parse::()?; - let int: Index = input.parse()?; - let ident = format_ident!("_{}", int.index, span = int.span); - tokens.push(TokenTree::Ident(ident)); - begin_expr = false; - continue; - } - } - - begin_expr = input.peek(Token![break]) - || input.peek(Token![continue]) - || input.peek(Token![if]) - || input.peek(Token![in]) - || input.peek(Token![match]) - || input.peek(Token![mut]) - || input.peek(Token![return]) - || input.peek(Token![while]) - || input.peek(Token![+]) - || input.peek(Token![&]) - || input.peek(Token![!]) - || input.peek(Token![^]) - || input.peek(Token![,]) - || input.peek(Token![/]) - || input.peek(Token![=]) - || input.peek(Token![>]) - || input.peek(Token![<]) - || input.peek(Token![|]) - || input.peek(Token![%]) - || input.peek(Token![;]) - || input.peek(Token![*]) - || input.peek(Token![-]); - - let token: TokenTree = if input.peek(token::Paren) { - let content; - let delimiter = parenthesized!(content in input); - let nested = parse_token_expr(&content, true)?; - let mut group = Group::new(Delimiter::Parenthesis, nested); - group.set_span(delimiter.span); - TokenTree::Group(group) - } else if input.peek(token::Brace) { - let content; - let delimiter = braced!(content in input); - let nested = parse_token_expr(&content, true)?; - let mut group = Group::new(Delimiter::Brace, nested); - group.set_span(delimiter.span); - TokenTree::Group(group) - } else if input.peek(token::Bracket) { - let content; - let delimiter = bracketed!(content in input); - let nested = parse_token_expr(&content, true)?; - let mut group = Group::new(Delimiter::Bracket, nested); - group.set_span(delimiter.span); - TokenTree::Group(group) - } else { - input.parse()? - }; - tokens.push(token); - } - Ok(TokenStream::from_iter(tokens)) -} - -fn require_empty_attribute(attr: &Attribute) -> Result<()> { - syn::parse2::(attr.tokens.clone())?; - Ok(()) -} - -impl ToTokens for Display<'_> { - fn to_tokens(&self, tokens: &mut TokenStream) { - let fmt = &self.fmt; - let args = &self.args; - tokens.extend(quote! { - write!(__formatter, #fmt #args) - }); - } -} - -impl ToTokens for Trait { - fn to_tokens(&self, tokens: &mut TokenStream) { - let trait_name = format_ident!("{}", format!("{:?}", self)); - tokens.extend(quote!(std::fmt::#trait_name)); - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/expand.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/expand.rs deleted file mode 100644 index 43522096a1ed..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/expand.rs +++ /dev/null @@ -1,546 +0,0 @@ -use crate::ast::{Enum, Field, Input, Struct}; -use crate::attr::Trait; -use crate::generics::InferredBounds; -use proc_macro2::TokenStream; -use quote::{format_ident, quote, quote_spanned, ToTokens}; -use std::collections::BTreeSet as Set; -use syn::spanned::Spanned; -use syn::{ - Data, DeriveInput, GenericArgument, Member, PathArguments, Result, Token, Type, Visibility, -}; - -pub fn derive(node: &DeriveInput) -> Result { - let input = Input::from_syn(node)?; - input.validate()?; - Ok(match input { - Input::Struct(input) => impl_struct(input), - Input::Enum(input) => impl_enum(input), - }) -} - -fn impl_struct(input: Struct) -> TokenStream { - let ty = &input.ident; - let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); - let mut error_inferred_bounds = InferredBounds::new(); - - let source_body = if input.attrs.transparent.is_some() { - let only_field = &input.fields[0]; - if only_field.contains_generic { - error_inferred_bounds.insert(only_field.ty, quote!(std::error::Error)); - } - let member = &only_field.member; - Some(quote! { - std::error::Error::source(self.#member.as_dyn_error()) - }) - } else if let Some(source_field) = input.source_field() { - let source = &source_field.member; - if source_field.contains_generic { - let ty = unoptional_type(source_field.ty); - error_inferred_bounds.insert(ty, quote!(std::error::Error + 'static)); - } - let asref = if type_is_option(source_field.ty) { - Some(quote_spanned!(source.span()=> .as_ref()?)) - } else { - None - }; - let dyn_error = quote_spanned!(source.span()=> self.#source #asref.as_dyn_error()); - Some(quote! { - std::option::Option::Some(#dyn_error) - }) - } else { - None - }; - let source_method = source_body.map(|body| { - quote! { - fn source(&self) -> std::option::Option<&(dyn std::error::Error + 'static)> { - use thiserror::__private::AsDynError; - #body - } - } - }); - - let provide_method = input.backtrace_field().map(|backtrace_field| { - let demand = quote!(demand); - let backtrace = &backtrace_field.member; - let body = if let Some(source_field) = input.source_field() { - let source = &source_field.member; - let source_provide = if type_is_option(source_field.ty) { - quote_spanned! {source.span()=> - if let std::option::Option::Some(source) = &self.#source { - source.thiserror_provide(#demand); - } - } - } else { - quote_spanned! {source.span()=> - self.#source.thiserror_provide(#demand); - } - }; - let self_provide = if source == backtrace { - None - } else if type_is_option(backtrace_field.ty) { - Some(quote! { - if let std::option::Option::Some(backtrace) = &self.#backtrace { - #demand.provide_ref::(backtrace); - } - }) - } else { - Some(quote! { - #demand.provide_ref::(&self.#backtrace); - }) - }; - quote! { - use thiserror::__private::ThiserrorProvide; - #source_provide - #self_provide - } - } else if type_is_option(backtrace_field.ty) { - quote! { - if let std::option::Option::Some(backtrace) = &self.#backtrace { - #demand.provide_ref::(backtrace); - } - } - } else { - quote! { - #demand.provide_ref::(&self.#backtrace); - } - }; - quote! { - fn provide<'_demand>(&'_demand self, #demand: &mut std::any::Demand<'_demand>) { - #body - } - } - }); - - let mut display_implied_bounds = Set::new(); - let display_body = if input.attrs.transparent.is_some() { - let only_field = &input.fields[0].member; - display_implied_bounds.insert((0, Trait::Display)); - Some(quote! { - std::fmt::Display::fmt(&self.#only_field, __formatter) - }) - } else if let Some(display) = &input.attrs.display { - display_implied_bounds = display.implied_bounds.clone(); - let use_as_display = if display.has_bonus_display { - Some(quote! { - #[allow(unused_imports)] - use thiserror::__private::{DisplayAsDisplay, PathAsDisplay}; - }) - } else { - None - }; - let pat = fields_pat(&input.fields); - Some(quote! { - #use_as_display - #[allow(unused_variables, deprecated)] - let Self #pat = self; - #display - }) - } else { - None - }; - let display_impl = display_body.map(|body| { - let mut display_inferred_bounds = InferredBounds::new(); - for (field, bound) in display_implied_bounds { - let field = &input.fields[field]; - if field.contains_generic { - display_inferred_bounds.insert(field.ty, bound); - } - } - let display_where_clause = display_inferred_bounds.augment_where_clause(input.generics); - quote! { - #[allow(unused_qualifications)] - impl #impl_generics std::fmt::Display for #ty #ty_generics #display_where_clause { - #[allow(clippy::used_underscore_binding)] - fn fmt(&self, __formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - #body - } - } - } - }); - - let from_impl = input.from_field().map(|from_field| { - let backtrace_field = input.distinct_backtrace_field(); - let from = unoptional_type(from_field.ty); - let body = from_initializer(from_field, backtrace_field); - quote! { - #[allow(unused_qualifications)] - impl #impl_generics std::convert::From<#from> for #ty #ty_generics #where_clause { - #[allow(deprecated)] - fn from(source: #from) -> Self { - #ty #body - } - } - } - }); - - let error_trait = spanned_error_trait(input.original); - if input.generics.type_params().next().is_some() { - let self_token = ::default(); - error_inferred_bounds.insert(self_token, Trait::Debug); - error_inferred_bounds.insert(self_token, Trait::Display); - } - let error_where_clause = error_inferred_bounds.augment_where_clause(input.generics); - - quote! { - #[allow(unused_qualifications)] - impl #impl_generics #error_trait for #ty #ty_generics #error_where_clause { - #source_method - #provide_method - } - #display_impl - #from_impl - } -} - -fn impl_enum(input: Enum) -> TokenStream { - let ty = &input.ident; - let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); - let mut error_inferred_bounds = InferredBounds::new(); - - let source_method = if input.has_source() { - let arms = input.variants.iter().map(|variant| { - let ident = &variant.ident; - if variant.attrs.transparent.is_some() { - let only_field = &variant.fields[0]; - if only_field.contains_generic { - error_inferred_bounds.insert(only_field.ty, quote!(std::error::Error)); - } - let member = &only_field.member; - let source = quote!(std::error::Error::source(transparent.as_dyn_error())); - quote! { - #ty::#ident {#member: transparent} => #source, - } - } else if let Some(source_field) = variant.source_field() { - let source = &source_field.member; - if source_field.contains_generic { - let ty = unoptional_type(source_field.ty); - error_inferred_bounds.insert(ty, quote!(std::error::Error + 'static)); - } - let asref = if type_is_option(source_field.ty) { - Some(quote_spanned!(source.span()=> .as_ref()?)) - } else { - None - }; - let varsource = quote!(source); - let dyn_error = quote_spanned!(source.span()=> #varsource #asref.as_dyn_error()); - quote! { - #ty::#ident {#source: #varsource, ..} => std::option::Option::Some(#dyn_error), - } - } else { - quote! { - #ty::#ident {..} => std::option::Option::None, - } - } - }); - Some(quote! { - fn source(&self) -> std::option::Option<&(dyn std::error::Error + 'static)> { - use thiserror::__private::AsDynError; - #[allow(deprecated)] - match self { - #(#arms)* - } - } - }) - } else { - None - }; - - let provide_method = if input.has_backtrace() { - let demand = quote!(demand); - let arms = input.variants.iter().map(|variant| { - let ident = &variant.ident; - match (variant.backtrace_field(), variant.source_field()) { - (Some(backtrace_field), Some(source_field)) - if backtrace_field.attrs.backtrace.is_none() => - { - let backtrace = &backtrace_field.member; - let source = &source_field.member; - let varsource = quote!(source); - let source_provide = if type_is_option(source_field.ty) { - quote_spanned! {source.span()=> - if let std::option::Option::Some(source) = #varsource { - source.thiserror_provide(#demand); - } - } - } else { - quote_spanned! {source.span()=> - #varsource.thiserror_provide(#demand); - } - }; - let self_provide = if type_is_option(backtrace_field.ty) { - quote! { - if let std::option::Option::Some(backtrace) = backtrace { - #demand.provide_ref::(backtrace); - } - } - } else { - quote! { - #demand.provide_ref::(backtrace); - } - }; - quote! { - #ty::#ident { - #backtrace: backtrace, - #source: #varsource, - .. - } => { - use thiserror::__private::ThiserrorProvide; - #source_provide - #self_provide - } - } - } - (Some(backtrace_field), Some(source_field)) - if backtrace_field.member == source_field.member => - { - let backtrace = &backtrace_field.member; - let varsource = quote!(source); - let source_provide = if type_is_option(source_field.ty) { - quote_spanned! {backtrace.span()=> - if let std::option::Option::Some(source) = #varsource { - source.thiserror_provide(#demand); - } - } - } else { - quote_spanned! {backtrace.span()=> - #varsource.thiserror_provide(#demand); - } - }; - quote! { - #ty::#ident {#backtrace: #varsource, ..} => { - use thiserror::__private::ThiserrorProvide; - #source_provide - } - } - } - (Some(backtrace_field), _) => { - let backtrace = &backtrace_field.member; - let body = if type_is_option(backtrace_field.ty) { - quote! { - if let std::option::Option::Some(backtrace) = backtrace { - #demand.provide_ref::(backtrace); - } - } - } else { - quote! { - #demand.provide_ref::(backtrace); - } - }; - quote! { - #ty::#ident {#backtrace: backtrace, ..} => { - #body - } - } - } - (None, _) => quote! { - #ty::#ident {..} => {} - }, - } - }); - Some(quote! { - fn provide<'_demand>(&'_demand self, #demand: &mut std::any::Demand<'_demand>) { - #[allow(deprecated)] - match self { - #(#arms)* - } - } - }) - } else { - None - }; - - let display_impl = if input.has_display() { - let mut display_inferred_bounds = InferredBounds::new(); - let use_as_display = if input.variants.iter().any(|v| { - v.attrs - .display - .as_ref() - .map_or(false, |display| display.has_bonus_display) - }) { - Some(quote! { - #[allow(unused_imports)] - use thiserror::__private::{DisplayAsDisplay, PathAsDisplay}; - }) - } else { - None - }; - let void_deref = if input.variants.is_empty() { - Some(quote!(*)) - } else { - None - }; - let arms = input.variants.iter().map(|variant| { - let mut display_implied_bounds = Set::new(); - let display = match &variant.attrs.display { - Some(display) => { - display_implied_bounds = display.implied_bounds.clone(); - display.to_token_stream() - } - None => { - let only_field = match &variant.fields[0].member { - Member::Named(ident) => ident.clone(), - Member::Unnamed(index) => format_ident!("_{}", index), - }; - display_implied_bounds.insert((0, Trait::Display)); - quote!(std::fmt::Display::fmt(#only_field, __formatter)) - } - }; - for (field, bound) in display_implied_bounds { - let field = &variant.fields[field]; - if field.contains_generic { - display_inferred_bounds.insert(field.ty, bound); - } - } - let ident = &variant.ident; - let pat = fields_pat(&variant.fields); - quote! { - #ty::#ident #pat => #display - } - }); - let arms = arms.collect::>(); - let display_where_clause = display_inferred_bounds.augment_where_clause(input.generics); - Some(quote! { - #[allow(unused_qualifications)] - impl #impl_generics std::fmt::Display for #ty #ty_generics #display_where_clause { - fn fmt(&self, __formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - #use_as_display - #[allow(unused_variables, deprecated, clippy::used_underscore_binding)] - match #void_deref self { - #(#arms,)* - } - } - } - }) - } else { - None - }; - - let from_impls = input.variants.iter().filter_map(|variant| { - let from_field = variant.from_field()?; - let backtrace_field = variant.distinct_backtrace_field(); - let variant = &variant.ident; - let from = unoptional_type(from_field.ty); - let body = from_initializer(from_field, backtrace_field); - Some(quote! { - #[allow(unused_qualifications)] - impl #impl_generics std::convert::From<#from> for #ty #ty_generics #where_clause { - #[allow(deprecated)] - fn from(source: #from) -> Self { - #ty::#variant #body - } - } - }) - }); - - let error_trait = spanned_error_trait(input.original); - if input.generics.type_params().next().is_some() { - let self_token = ::default(); - error_inferred_bounds.insert(self_token, Trait::Debug); - error_inferred_bounds.insert(self_token, Trait::Display); - } - let error_where_clause = error_inferred_bounds.augment_where_clause(input.generics); - - quote! { - #[allow(unused_qualifications)] - impl #impl_generics #error_trait for #ty #ty_generics #error_where_clause { - #source_method - #provide_method - } - #display_impl - #(#from_impls)* - } -} - -fn fields_pat(fields: &[Field]) -> TokenStream { - let mut members = fields.iter().map(|field| &field.member).peekable(); - match members.peek() { - Some(Member::Named(_)) => quote!({ #(#members),* }), - Some(Member::Unnamed(_)) => { - let vars = members.map(|member| match member { - Member::Unnamed(member) => format_ident!("_{}", member), - Member::Named(_) => unreachable!(), - }); - quote!((#(#vars),*)) - } - None => quote!({}), - } -} - -fn from_initializer(from_field: &Field, backtrace_field: Option<&Field>) -> TokenStream { - let from_member = &from_field.member; - let some_source = if type_is_option(from_field.ty) { - quote!(std::option::Option::Some(source)) - } else { - quote!(source) - }; - let backtrace = backtrace_field.map(|backtrace_field| { - let backtrace_member = &backtrace_field.member; - if type_is_option(backtrace_field.ty) { - quote! { - #backtrace_member: std::option::Option::Some(std::backtrace::Backtrace::capture()), - } - } else { - quote! { - #backtrace_member: std::convert::From::from(std::backtrace::Backtrace::capture()), - } - } - }); - quote!({ - #from_member: #some_source, - #backtrace - }) -} - -fn type_is_option(ty: &Type) -> bool { - type_parameter_of_option(ty).is_some() -} - -fn unoptional_type(ty: &Type) -> TokenStream { - let unoptional = type_parameter_of_option(ty).unwrap_or(ty); - quote!(#unoptional) -} - -fn type_parameter_of_option(ty: &Type) -> Option<&Type> { - let path = match ty { - Type::Path(ty) => &ty.path, - _ => return None, - }; - - let last = path.segments.last().unwrap(); - if last.ident != "Option" { - return None; - } - - let bracketed = match &last.arguments { - PathArguments::AngleBracketed(bracketed) => bracketed, - _ => return None, - }; - - if bracketed.args.len() != 1 { - return None; - } - - match &bracketed.args[0] { - GenericArgument::Type(arg) => Some(arg), - _ => None, - } -} - -fn spanned_error_trait(input: &DeriveInput) -> TokenStream { - let vis_span = match &input.vis { - Visibility::Public(vis) => Some(vis.pub_token.span()), - Visibility::Crate(vis) => Some(vis.crate_token.span()), - Visibility::Restricted(vis) => Some(vis.pub_token.span()), - Visibility::Inherited => None, - }; - let data_span = match &input.data { - Data::Struct(data) => data.struct_token.span(), - Data::Enum(data) => data.enum_token.span(), - Data::Union(data) => data.union_token.span(), - }; - let first_span = vis_span.unwrap_or(data_span); - let last_span = input.ident.span(); - let path = quote_spanned!(first_span=> std::error::); - let error = quote_spanned!(last_span=> Error); - quote!(#path #error) -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/lib.rs deleted file mode 100644 index f0fc96917065..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/lib.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![allow( - clippy::blocks_in_if_conditions, - clippy::cast_lossless, - clippy::cast_possible_truncation, - clippy::manual_find, - clippy::manual_map, - clippy::map_unwrap_or, - clippy::needless_pass_by_value, - clippy::option_if_let_else, - clippy::range_plus_one, - clippy::single_match_else, - clippy::too_many_lines, - clippy::wrong_self_convention -)] - -extern crate proc_macro; - -mod ast; -mod attr; -mod expand; -mod fmt; -mod generics; -mod prop; -mod valid; - -use proc_macro::TokenStream; -use syn::{parse_macro_input, DeriveInput}; - -#[proc_macro_derive(Error, attributes(backtrace, error, from, source))] -pub fn derive_error(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - expand::derive(&input) - .unwrap_or_else(|err| err.to_compile_error()) - .into() -} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/.cargo-checksum.json b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/.cargo-checksum.json new file mode 100644 index 000000000000..697c9ce2fbb4 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{}} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/.cargo_vcs_info.json new file mode 100644 index 000000000000..1c6de130c8c3 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "5088592a4efb6a5c40b4d869eb1a0e2eacf622cb" + }, + "path_in_vcs": "impl" +} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/Cargo.toml new file mode 100644 index 000000000000..d5dc0bc5895b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56" +name = "thiserror-impl" +version = "1.0.65" +authors = ["David Tolnay "] +build = false +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Implementation detail of the `thiserror` crate" +readme = false +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/thiserror" + +[package.metadata.docs.rs] +rustdoc-args = ["--generate-link-to-definition"] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +name = "thiserror_impl" +path = "src/lib.rs" +proc-macro = true + +[dependencies.proc-macro2] +version = "1.0.74" + +[dependencies.quote] +version = "1.0.35" + +[dependencies.syn] +version = "2.0.46" diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/Cargo.toml.orig new file mode 100644 index 000000000000..fc0a53beed7a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/Cargo.toml.orig @@ -0,0 +1,21 @@ +[package] +name = "thiserror-impl" +version = "1.0.65" +authors = ["David Tolnay "] +description = "Implementation detail of the `thiserror` crate" +edition = "2021" +license = "MIT OR Apache-2.0" +repository = "https://github.com/dtolnay/thiserror" +rust-version = "1.56" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "1.0.74" +quote = "1.0.35" +syn = "2.0.46" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] +rustdoc-args = ["--generate-link-to-definition"] diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/LICENSE-APACHE b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/LICENSE-APACHE new file mode 100644 index 000000000000..1b5ec8b78e23 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/LICENSE-MIT similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/LICENSE-MIT rename to third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/LICENSE-MIT diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/ast.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/ast.rs similarity index 96% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/ast.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/ast.rs index 2aa7246c82d0..4739d58beb34 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/ast.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/ast.rs @@ -12,7 +12,6 @@ pub enum Input<'a> { } pub struct Struct<'a> { - pub original: &'a DeriveInput, pub attrs: Attrs<'a>, pub ident: Ident, pub generics: &'a Generics, @@ -20,7 +19,6 @@ pub struct Struct<'a> { } pub struct Enum<'a> { - pub original: &'a DeriveInput, pub attrs: Attrs<'a>, pub ident: Ident, pub generics: &'a Generics, @@ -65,7 +63,6 @@ impl<'a> Struct<'a> { display.expand_shorthand(&fields); } Ok(Struct { - original: node, attrs, ident: node.ident.clone(), generics: &node.generics, @@ -85,7 +82,7 @@ impl<'a> Enum<'a> { .map(|node| { let mut variant = Variant::from_syn(node, &scope, span)?; if let display @ None = &mut variant.attrs.display { - *display = attrs.display.clone(); + display.clone_from(&attrs.display); } if let Some(display) = &mut variant.attrs.display { display.expand_shorthand(&variant.fields); @@ -96,7 +93,6 @@ impl<'a> Enum<'a> { }) .collect::>()?; Ok(Enum { - original: node, attrs, ident: node.ident.clone(), generics: &node.generics, diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/attr.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/attr.rs new file mode 100644 index 000000000000..e0ac02b1358d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/attr.rs @@ -0,0 +1,275 @@ +use proc_macro2::{Delimiter, Group, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; +use quote::{format_ident, quote, ToTokens}; +use std::collections::BTreeSet as Set; +use syn::parse::discouraged::Speculative; +use syn::parse::ParseStream; +use syn::{ + braced, bracketed, parenthesized, token, Attribute, Error, Ident, Index, LitFloat, LitInt, + LitStr, Meta, Result, Token, +}; + +pub struct Attrs<'a> { + pub display: Option>, + pub source: Option<&'a Attribute>, + pub backtrace: Option<&'a Attribute>, + pub from: Option<&'a Attribute>, + pub transparent: Option>, +} + +#[derive(Clone)] +pub struct Display<'a> { + pub original: &'a Attribute, + pub fmt: LitStr, + pub args: TokenStream, + pub requires_fmt_machinery: bool, + pub has_bonus_display: bool, + pub implied_bounds: Set<(usize, Trait)>, +} + +#[derive(Copy, Clone)] +pub struct Transparent<'a> { + pub original: &'a Attribute, + pub span: Span, +} + +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] +pub enum Trait { + Debug, + Display, + Octal, + LowerHex, + UpperHex, + Pointer, + Binary, + LowerExp, + UpperExp, +} + +pub fn get(input: &[Attribute]) -> Result { + let mut attrs = Attrs { + display: None, + source: None, + backtrace: None, + from: None, + transparent: None, + }; + + for attr in input { + if attr.path().is_ident("error") { + parse_error_attribute(&mut attrs, attr)?; + } else if attr.path().is_ident("source") { + attr.meta.require_path_only()?; + if attrs.source.is_some() { + return Err(Error::new_spanned(attr, "duplicate #[source] attribute")); + } + attrs.source = Some(attr); + } else if attr.path().is_ident("backtrace") { + attr.meta.require_path_only()?; + if attrs.backtrace.is_some() { + return Err(Error::new_spanned(attr, "duplicate #[backtrace] attribute")); + } + attrs.backtrace = Some(attr); + } else if attr.path().is_ident("from") { + match attr.meta { + Meta::Path(_) => {} + Meta::List(_) | Meta::NameValue(_) => { + // Assume this is meant for derive_more crate or something. + continue; + } + } + if attrs.from.is_some() { + return Err(Error::new_spanned(attr, "duplicate #[from] attribute")); + } + attrs.from = Some(attr); + } + } + + Ok(attrs) +} + +fn parse_error_attribute<'a>(attrs: &mut Attrs<'a>, attr: &'a Attribute) -> Result<()> { + syn::custom_keyword!(transparent); + + attr.parse_args_with(|input: ParseStream| { + if let Some(kw) = input.parse::>()? { + if attrs.transparent.is_some() { + return Err(Error::new_spanned( + attr, + "duplicate #[error(transparent)] attribute", + )); + } + attrs.transparent = Some(Transparent { + original: attr, + span: kw.span, + }); + return Ok(()); + } + + let fmt: LitStr = input.parse()?; + + let ahead = input.fork(); + ahead.parse::>()?; + let args = if ahead.is_empty() { + input.advance_to(&ahead); + TokenStream::new() + } else { + parse_token_expr(input, false)? + }; + + let requires_fmt_machinery = !args.is_empty(); + + let display = Display { + original: attr, + fmt, + args, + requires_fmt_machinery, + has_bonus_display: false, + implied_bounds: Set::new(), + }; + if attrs.display.is_some() { + return Err(Error::new_spanned( + attr, + "only one #[error(...)] attribute is allowed", + )); + } + attrs.display = Some(display); + Ok(()) + }) +} + +fn parse_token_expr(input: ParseStream, mut begin_expr: bool) -> Result { + let mut tokens = Vec::new(); + while !input.is_empty() { + if begin_expr && input.peek(Token![.]) { + if input.peek2(Ident) { + input.parse::()?; + begin_expr = false; + continue; + } else if input.peek2(LitInt) { + input.parse::()?; + let int: Index = input.parse()?; + tokens.push({ + let ident = format_ident!("_{}", int.index, span = int.span); + TokenTree::Ident(ident) + }); + begin_expr = false; + continue; + } else if input.peek2(LitFloat) { + let ahead = input.fork(); + ahead.parse::()?; + let float: LitFloat = ahead.parse()?; + let repr = float.to_string(); + let mut indices = repr.split('.').map(syn::parse_str::); + if let (Some(Ok(first)), Some(Ok(second)), None) = + (indices.next(), indices.next(), indices.next()) + { + input.advance_to(&ahead); + tokens.push({ + let ident = format_ident!("_{}", first, span = float.span()); + TokenTree::Ident(ident) + }); + tokens.push({ + let mut punct = Punct::new('.', Spacing::Alone); + punct.set_span(float.span()); + TokenTree::Punct(punct) + }); + tokens.push({ + let mut literal = Literal::u32_unsuffixed(second.index); + literal.set_span(float.span()); + TokenTree::Literal(literal) + }); + begin_expr = false; + continue; + } + } + } + + begin_expr = input.peek(Token![break]) + || input.peek(Token![continue]) + || input.peek(Token![if]) + || input.peek(Token![in]) + || input.peek(Token![match]) + || input.peek(Token![mut]) + || input.peek(Token![return]) + || input.peek(Token![while]) + || input.peek(Token![+]) + || input.peek(Token![&]) + || input.peek(Token![!]) + || input.peek(Token![^]) + || input.peek(Token![,]) + || input.peek(Token![/]) + || input.peek(Token![=]) + || input.peek(Token![>]) + || input.peek(Token![<]) + || input.peek(Token![|]) + || input.peek(Token![%]) + || input.peek(Token![;]) + || input.peek(Token![*]) + || input.peek(Token![-]); + + let token: TokenTree = if input.peek(token::Paren) { + let content; + let delimiter = parenthesized!(content in input); + let nested = parse_token_expr(&content, true)?; + let mut group = Group::new(Delimiter::Parenthesis, nested); + group.set_span(delimiter.span.join()); + TokenTree::Group(group) + } else if input.peek(token::Brace) { + let content; + let delimiter = braced!(content in input); + let nested = parse_token_expr(&content, true)?; + let mut group = Group::new(Delimiter::Brace, nested); + group.set_span(delimiter.span.join()); + TokenTree::Group(group) + } else if input.peek(token::Bracket) { + let content; + let delimiter = bracketed!(content in input); + let nested = parse_token_expr(&content, true)?; + let mut group = Group::new(Delimiter::Bracket, nested); + group.set_span(delimiter.span.join()); + TokenTree::Group(group) + } else { + input.parse()? + }; + tokens.push(token); + } + Ok(TokenStream::from_iter(tokens)) +} + +impl ToTokens for Display<'_> { + fn to_tokens(&self, tokens: &mut TokenStream) { + let fmt = &self.fmt; + let args = &self.args; + + // Currently `write!(f, "text")` produces less efficient code than + // `f.write_str("text")`. We recognize the case when the format string + // has no braces and no interpolated values, and generate simpler code. + tokens.extend(if self.requires_fmt_machinery { + quote! { + ::core::write!(__formatter, #fmt #args) + } + } else { + quote! { + __formatter.write_str(#fmt) + } + }); + } +} + +impl ToTokens for Trait { + fn to_tokens(&self, tokens: &mut TokenStream) { + let trait_name = match self { + Trait::Debug => "Debug", + Trait::Display => "Display", + Trait::Octal => "Octal", + Trait::LowerHex => "LowerHex", + Trait::UpperHex => "UpperHex", + Trait::Pointer => "Pointer", + Trait::Binary => "Binary", + Trait::LowerExp => "LowerExp", + Trait::UpperExp => "UpperExp", + }; + let ident = Ident::new(trait_name, Span::call_site()); + tokens.extend(quote!(::core::fmt::#ident)); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/expand.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/expand.rs new file mode 100644 index 000000000000..403cd07d8c44 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/expand.rs @@ -0,0 +1,570 @@ +use crate::ast::{Enum, Field, Input, Struct}; +use crate::attr::Trait; +use crate::generics::InferredBounds; +use crate::span::MemberSpan; +use proc_macro2::TokenStream; +use quote::{format_ident, quote, quote_spanned, ToTokens}; +use std::collections::BTreeSet as Set; +use syn::{DeriveInput, GenericArgument, Member, PathArguments, Result, Token, Type}; + +pub fn derive(input: &DeriveInput) -> TokenStream { + match try_expand(input) { + Ok(expanded) => expanded, + // If there are invalid attributes in the input, expand to an Error impl + // anyway to minimize spurious knock-on errors in other code that uses + // this type as an Error. + Err(error) => fallback(input, error), + } +} + +fn try_expand(input: &DeriveInput) -> Result { + let input = Input::from_syn(input)?; + input.validate()?; + Ok(match input { + Input::Struct(input) => impl_struct(input), + Input::Enum(input) => impl_enum(input), + }) +} + +fn fallback(input: &DeriveInput, error: syn::Error) -> TokenStream { + let ty = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + + let error = error.to_compile_error(); + + quote! { + #error + + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics std::error::Error for #ty #ty_generics #where_clause + where + // Work around trivial bounds being unstable. + // https://github.com/rust-lang/rust/issues/48214 + for<'workaround> #ty #ty_generics: ::core::fmt::Debug, + {} + + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics ::core::fmt::Display for #ty #ty_generics #where_clause { + fn fmt(&self, __formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + ::core::unreachable!() + } + } + } +} + +fn impl_struct(input: Struct) -> TokenStream { + let ty = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let mut error_inferred_bounds = InferredBounds::new(); + + let source_body = if let Some(transparent_attr) = &input.attrs.transparent { + let only_field = &input.fields[0]; + if only_field.contains_generic { + error_inferred_bounds.insert(only_field.ty, quote!(std::error::Error)); + } + let member = &only_field.member; + Some(quote_spanned! {transparent_attr.span=> + std::error::Error::source(self.#member.as_dyn_error()) + }) + } else if let Some(source_field) = input.source_field() { + let source = &source_field.member; + if source_field.contains_generic { + let ty = unoptional_type(source_field.ty); + error_inferred_bounds.insert(ty, quote!(std::error::Error + 'static)); + } + let asref = if type_is_option(source_field.ty) { + Some(quote_spanned!(source.member_span()=> .as_ref()?)) + } else { + None + }; + let dyn_error = quote_spanned! {source_field.source_span()=> + self.#source #asref.as_dyn_error() + }; + Some(quote! { + ::core::option::Option::Some(#dyn_error) + }) + } else { + None + }; + let source_method = source_body.map(|body| { + quote! { + fn source(&self) -> ::core::option::Option<&(dyn std::error::Error + 'static)> { + use thiserror::__private::AsDynError as _; + #body + } + } + }); + + let provide_method = input.backtrace_field().map(|backtrace_field| { + let request = quote!(request); + let backtrace = &backtrace_field.member; + let body = if let Some(source_field) = input.source_field() { + let source = &source_field.member; + let source_provide = if type_is_option(source_field.ty) { + quote_spanned! {source.member_span()=> + if let ::core::option::Option::Some(source) = &self.#source { + source.thiserror_provide(#request); + } + } + } else { + quote_spanned! {source.member_span()=> + self.#source.thiserror_provide(#request); + } + }; + let self_provide = if source == backtrace { + None + } else if type_is_option(backtrace_field.ty) { + Some(quote! { + if let ::core::option::Option::Some(backtrace) = &self.#backtrace { + #request.provide_ref::(backtrace); + } + }) + } else { + Some(quote! { + #request.provide_ref::(&self.#backtrace); + }) + }; + quote! { + use thiserror::__private::ThiserrorProvide as _; + #source_provide + #self_provide + } + } else if type_is_option(backtrace_field.ty) { + quote! { + if let ::core::option::Option::Some(backtrace) = &self.#backtrace { + #request.provide_ref::(backtrace); + } + } + } else { + quote! { + #request.provide_ref::(&self.#backtrace); + } + }; + quote! { + fn provide<'_request>(&'_request self, #request: &mut std::error::Request<'_request>) { + #body + } + } + }); + + let mut display_implied_bounds = Set::new(); + let display_body = if input.attrs.transparent.is_some() { + let only_field = &input.fields[0].member; + display_implied_bounds.insert((0, Trait::Display)); + Some(quote! { + ::core::fmt::Display::fmt(&self.#only_field, __formatter) + }) + } else if let Some(display) = &input.attrs.display { + display_implied_bounds.clone_from(&display.implied_bounds); + let use_as_display = use_as_display(display.has_bonus_display); + let pat = fields_pat(&input.fields); + Some(quote! { + #use_as_display + #[allow(unused_variables, deprecated)] + let Self #pat = self; + #display + }) + } else { + None + }; + let display_impl = display_body.map(|body| { + let mut display_inferred_bounds = InferredBounds::new(); + for (field, bound) in display_implied_bounds { + let field = &input.fields[field]; + if field.contains_generic { + display_inferred_bounds.insert(field.ty, bound); + } + } + let display_where_clause = display_inferred_bounds.augment_where_clause(input.generics); + quote! { + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics ::core::fmt::Display for #ty #ty_generics #display_where_clause { + #[allow(clippy::used_underscore_binding)] + fn fmt(&self, __formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + #body + } + } + } + }); + + let from_impl = input.from_field().map(|from_field| { + let backtrace_field = input.distinct_backtrace_field(); + let from = unoptional_type(from_field.ty); + let body = from_initializer(from_field, backtrace_field); + quote! { + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics ::core::convert::From<#from> for #ty #ty_generics #where_clause { + #[allow(deprecated)] + fn from(source: #from) -> Self { + #ty #body + } + } + } + }); + + if input.generics.type_params().next().is_some() { + let self_token = ::default(); + error_inferred_bounds.insert(self_token, Trait::Debug); + error_inferred_bounds.insert(self_token, Trait::Display); + } + let error_where_clause = error_inferred_bounds.augment_where_clause(input.generics); + + quote! { + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics std::error::Error for #ty #ty_generics #error_where_clause { + #source_method + #provide_method + } + #display_impl + #from_impl + } +} + +fn impl_enum(input: Enum) -> TokenStream { + let ty = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let mut error_inferred_bounds = InferredBounds::new(); + + let source_method = if input.has_source() { + let arms = input.variants.iter().map(|variant| { + let ident = &variant.ident; + if let Some(transparent_attr) = &variant.attrs.transparent { + let only_field = &variant.fields[0]; + if only_field.contains_generic { + error_inferred_bounds.insert(only_field.ty, quote!(std::error::Error)); + } + let member = &only_field.member; + let source = quote_spanned! {transparent_attr.span=> + std::error::Error::source(transparent.as_dyn_error()) + }; + quote! { + #ty::#ident {#member: transparent} => #source, + } + } else if let Some(source_field) = variant.source_field() { + let source = &source_field.member; + if source_field.contains_generic { + let ty = unoptional_type(source_field.ty); + error_inferred_bounds.insert(ty, quote!(std::error::Error + 'static)); + } + let asref = if type_is_option(source_field.ty) { + Some(quote_spanned!(source.member_span()=> .as_ref()?)) + } else { + None + }; + let varsource = quote!(source); + let dyn_error = quote_spanned! {source_field.source_span()=> + #varsource #asref.as_dyn_error() + }; + quote! { + #ty::#ident {#source: #varsource, ..} => ::core::option::Option::Some(#dyn_error), + } + } else { + quote! { + #ty::#ident {..} => ::core::option::Option::None, + } + } + }); + Some(quote! { + fn source(&self) -> ::core::option::Option<&(dyn std::error::Error + 'static)> { + use thiserror::__private::AsDynError as _; + #[allow(deprecated)] + match self { + #(#arms)* + } + } + }) + } else { + None + }; + + let provide_method = if input.has_backtrace() { + let request = quote!(request); + let arms = input.variants.iter().map(|variant| { + let ident = &variant.ident; + match (variant.backtrace_field(), variant.source_field()) { + (Some(backtrace_field), Some(source_field)) + if backtrace_field.attrs.backtrace.is_none() => + { + let backtrace = &backtrace_field.member; + let source = &source_field.member; + let varsource = quote!(source); + let source_provide = if type_is_option(source_field.ty) { + quote_spanned! {source.member_span()=> + if let ::core::option::Option::Some(source) = #varsource { + source.thiserror_provide(#request); + } + } + } else { + quote_spanned! {source.member_span()=> + #varsource.thiserror_provide(#request); + } + }; + let self_provide = if type_is_option(backtrace_field.ty) { + quote! { + if let ::core::option::Option::Some(backtrace) = backtrace { + #request.provide_ref::(backtrace); + } + } + } else { + quote! { + #request.provide_ref::(backtrace); + } + }; + quote! { + #ty::#ident { + #backtrace: backtrace, + #source: #varsource, + .. + } => { + use thiserror::__private::ThiserrorProvide as _; + #source_provide + #self_provide + } + } + } + (Some(backtrace_field), Some(source_field)) + if backtrace_field.member == source_field.member => + { + let backtrace = &backtrace_field.member; + let varsource = quote!(source); + let source_provide = if type_is_option(source_field.ty) { + quote_spanned! {backtrace.member_span()=> + if let ::core::option::Option::Some(source) = #varsource { + source.thiserror_provide(#request); + } + } + } else { + quote_spanned! {backtrace.member_span()=> + #varsource.thiserror_provide(#request); + } + }; + quote! { + #ty::#ident {#backtrace: #varsource, ..} => { + use thiserror::__private::ThiserrorProvide as _; + #source_provide + } + } + } + (Some(backtrace_field), _) => { + let backtrace = &backtrace_field.member; + let body = if type_is_option(backtrace_field.ty) { + quote! { + if let ::core::option::Option::Some(backtrace) = backtrace { + #request.provide_ref::(backtrace); + } + } + } else { + quote! { + #request.provide_ref::(backtrace); + } + }; + quote! { + #ty::#ident {#backtrace: backtrace, ..} => { + #body + } + } + } + (None, _) => quote! { + #ty::#ident {..} => {} + }, + } + }); + Some(quote! { + fn provide<'_request>(&'_request self, #request: &mut std::error::Request<'_request>) { + #[allow(deprecated)] + match self { + #(#arms)* + } + } + }) + } else { + None + }; + + let display_impl = if input.has_display() { + let mut display_inferred_bounds = InferredBounds::new(); + let has_bonus_display = input.variants.iter().any(|v| { + v.attrs + .display + .as_ref() + .map_or(false, |display| display.has_bonus_display) + }); + let use_as_display = use_as_display(has_bonus_display); + let void_deref = if input.variants.is_empty() { + Some(quote!(*)) + } else { + None + }; + let arms = input.variants.iter().map(|variant| { + let mut display_implied_bounds = Set::new(); + let display = match &variant.attrs.display { + Some(display) => { + display_implied_bounds.clone_from(&display.implied_bounds); + display.to_token_stream() + } + None => { + let only_field = match &variant.fields[0].member { + Member::Named(ident) => ident.clone(), + Member::Unnamed(index) => format_ident!("_{}", index), + }; + display_implied_bounds.insert((0, Trait::Display)); + quote!(::core::fmt::Display::fmt(#only_field, __formatter)) + } + }; + for (field, bound) in display_implied_bounds { + let field = &variant.fields[field]; + if field.contains_generic { + display_inferred_bounds.insert(field.ty, bound); + } + } + let ident = &variant.ident; + let pat = fields_pat(&variant.fields); + quote! { + #ty::#ident #pat => #display + } + }); + let arms = arms.collect::>(); + let display_where_clause = display_inferred_bounds.augment_where_clause(input.generics); + Some(quote! { + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics ::core::fmt::Display for #ty #ty_generics #display_where_clause { + fn fmt(&self, __formatter: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { + #use_as_display + #[allow(unused_variables, deprecated, clippy::used_underscore_binding)] + match #void_deref self { + #(#arms,)* + } + } + } + }) + } else { + None + }; + + let from_impls = input.variants.iter().filter_map(|variant| { + let from_field = variant.from_field()?; + let backtrace_field = variant.distinct_backtrace_field(); + let variant = &variant.ident; + let from = unoptional_type(from_field.ty); + let body = from_initializer(from_field, backtrace_field); + Some(quote! { + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics ::core::convert::From<#from> for #ty #ty_generics #where_clause { + #[allow(deprecated)] + fn from(source: #from) -> Self { + #ty::#variant #body + } + } + }) + }); + + if input.generics.type_params().next().is_some() { + let self_token = ::default(); + error_inferred_bounds.insert(self_token, Trait::Debug); + error_inferred_bounds.insert(self_token, Trait::Display); + } + let error_where_clause = error_inferred_bounds.augment_where_clause(input.generics); + + quote! { + #[allow(unused_qualifications)] + #[automatically_derived] + impl #impl_generics std::error::Error for #ty #ty_generics #error_where_clause { + #source_method + #provide_method + } + #display_impl + #(#from_impls)* + } +} + +fn fields_pat(fields: &[Field]) -> TokenStream { + let mut members = fields.iter().map(|field| &field.member).peekable(); + match members.peek() { + Some(Member::Named(_)) => quote!({ #(#members),* }), + Some(Member::Unnamed(_)) => { + let vars = members.map(|member| match member { + Member::Unnamed(member) => format_ident!("_{}", member), + Member::Named(_) => unreachable!(), + }); + quote!((#(#vars),*)) + } + None => quote!({}), + } +} + +fn use_as_display(needs_as_display: bool) -> Option { + if needs_as_display { + Some(quote! { + use thiserror::__private::AsDisplay as _; + }) + } else { + None + } +} + +fn from_initializer(from_field: &Field, backtrace_field: Option<&Field>) -> TokenStream { + let from_member = &from_field.member; + let some_source = if type_is_option(from_field.ty) { + quote!(::core::option::Option::Some(source)) + } else { + quote!(source) + }; + let backtrace = backtrace_field.map(|backtrace_field| { + let backtrace_member = &backtrace_field.member; + if type_is_option(backtrace_field.ty) { + quote! { + #backtrace_member: ::core::option::Option::Some(std::backtrace::Backtrace::capture()), + } + } else { + quote! { + #backtrace_member: ::core::convert::From::from(std::backtrace::Backtrace::capture()), + } + } + }); + quote!({ + #from_member: #some_source, + #backtrace + }) +} + +fn type_is_option(ty: &Type) -> bool { + type_parameter_of_option(ty).is_some() +} + +fn unoptional_type(ty: &Type) -> TokenStream { + let unoptional = type_parameter_of_option(ty).unwrap_or(ty); + quote!(#unoptional) +} + +fn type_parameter_of_option(ty: &Type) -> Option<&Type> { + let path = match ty { + Type::Path(ty) => &ty.path, + _ => return None, + }; + + let last = path.segments.last().unwrap(); + if last.ident != "Option" { + return None; + } + + let bracketed = match &last.arguments { + PathArguments::AngleBracketed(bracketed) => bracketed, + _ => return None, + }; + + if bracketed.args.len() != 1 { + return None; + } + + match &bracketed.args[0] { + GenericArgument::Type(arg) => Some(arg), + _ => None, + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/fmt.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/fmt.rs similarity index 97% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/fmt.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/fmt.rs index 807dfb96770b..b38b7bf1f5d6 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/fmt.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/fmt.rs @@ -32,7 +32,10 @@ impl Display<'_> { } } + self.requires_fmt_machinery = self.requires_fmt_machinery || fmt.contains('}'); + while let Some(brace) = read.find('{') { + self.requires_fmt_machinery = true; out += &read[..brace + 1]; read = &read[brace + 1..]; if read.starts_with('{') { diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/generics.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/generics.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/generics.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/generics.rs diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/lib.rs new file mode 100644 index 000000000000..58f4bb5b5dde --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/lib.rs @@ -0,0 +1,36 @@ +#![allow( + clippy::blocks_in_conditions, + clippy::cast_lossless, + clippy::cast_possible_truncation, + clippy::manual_find, + clippy::manual_let_else, + clippy::manual_map, + clippy::map_unwrap_or, + clippy::module_name_repetitions, + clippy::needless_pass_by_value, + clippy::range_plus_one, + clippy::single_match_else, + clippy::struct_field_names, + clippy::too_many_lines, + clippy::wrong_self_convention +)] + +extern crate proc_macro; + +mod ast; +mod attr; +mod expand; +mod fmt; +mod generics; +mod prop; +mod span; +mod valid; + +use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(Error, attributes(backtrace, error, from, source))] +pub fn derive_error(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + expand::derive(&input).into() +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/prop.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/prop.rs similarity index 89% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/prop.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/prop.rs index 6d8a924c1172..2867cd312ab3 100644 --- a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/prop.rs +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/prop.rs @@ -1,4 +1,6 @@ use crate::ast::{Enum, Field, Struct, Variant}; +use crate::span::MemberSpan; +use proc_macro2::Span; use syn::{Member, Type}; impl Struct<'_> { @@ -70,6 +72,16 @@ impl Field<'_> { pub(crate) fn is_backtrace(&self) -> bool { type_is_backtrace(self.ty) } + + pub(crate) fn source_span(&self) -> Span { + if let Some(source_attr) = &self.attrs.source { + source_attr.path().get_ident().unwrap().span() + } else if let Some(from_attr) = &self.attrs.from { + from_attr.path().get_ident().unwrap().span() + } else { + self.member.member_span() + } + } } fn from_field<'a, 'b>(fields: &'a [Field<'b>]) -> Option<&'a Field<'b>> { diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/span.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/span.rs new file mode 100644 index 000000000000..c1237ddfc52d --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/span.rs @@ -0,0 +1,15 @@ +use proc_macro2::Span; +use syn::Member; + +pub trait MemberSpan { + fn member_span(&self) -> Span; +} + +impl MemberSpan for Member { + fn member_span(&self) -> Span { + match self { + Member::Named(ident) => ident.span(), + Member::Unnamed(index) => index.span, + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/valid.rs b/third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/valid.rs similarity index 100% rename from third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.38/src/valid.rs rename to third_party/rust/chromium_crates_io/vendor/thiserror-impl-1.0.65/src/valid.rs diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.cargo_vcs_info.json b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.cargo_vcs_info.json deleted file mode 100644 index 69ea9b4ba0cc..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "70caf4085324bff452d8c20ead55974513030696" - }, - "path_in_vcs": "" -} \ No newline at end of file diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.github/dependabot.yml b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.github/dependabot.yml deleted file mode 100644 index 7377d37597f7..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.github/dependabot.yml +++ /dev/null @@ -1,8 +0,0 @@ -version: 2 -updates: -- package-ecosystem: cargo - directory: "/" - schedule: - interval: daily - time: "08:00" - open-pull-requests-limit: 10 diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.github/workflows/main.yml b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.github/workflows/main.yml deleted file mode 100644 index c6fa6e992d4e..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/.github/workflows/main.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: CI -on: [push, pull_request] - -jobs: - test: - name: Test - runs-on: ubuntu-latest - strategy: - matrix: - rust: [stable, beta, nightly] - steps: - - uses: actions/checkout@master - - name: Install Rust (rustup) - run: rustup update ${{ matrix.rust }} && rustup default ${{ matrix.rust }} - - run: cargo test - - run: cargo test --features preserve_order - - run: cargo test --manifest-path test-suite/Cargo.toml - - run: cargo bench - - rustfmt: - name: Rustfmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Install Rust - run: rustup update stable && rustup default stable && rustup component add rustfmt - - run: cargo fmt -- --check - - publish_docs: - name: Publish Documentation - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Install Rust - run: rustup update stable && rustup default stable - - name: Build documentation - run: cargo doc --no-deps --all-features - - name: Publish documentation - run: | - cd target/doc - git init - git add . - git -c user.name='ci' -c user.email='ci' commit -m init - git push -f -q https://git:${{ secrets.github_token }}@github.com/${{ github.repository }} HEAD:gh-pages - if: github.event_name == 'push' && github.event.ref == 'refs/heads/master' diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/Cargo.toml b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/Cargo.toml deleted file mode 100644 index 2e30a900a8fa..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "toml" -version = "0.5.9" -authors = ["Alex Crichton "] -description = """ -A native Rust encoder and decoder of TOML-formatted files and streams. Provides -implementations of the standard Serialize/Deserialize traits for TOML data to -facilitate deserializing and serializing Rust structures. -""" -homepage = "https://github.com/alexcrichton/toml-rs" -documentation = "https://docs.rs/toml" -readme = "README.md" -keywords = ["encoding"] -categories = [ - "config", - "encoding", - "parser-implementations", -] -license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/toml-rs" - -[dependencies.indexmap] -version = "1.0" -optional = true - -[dependencies.serde] -version = "1.0.97" - -[dev-dependencies.serde_derive] -version = "1.0" - -[dev-dependencies.serde_json] -version = "1.0" - -[features] -default = [] -preserve_order = ["indexmap"] diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/Cargo.toml.orig b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/Cargo.toml.orig deleted file mode 100644 index f87580283f95..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/Cargo.toml.orig +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "toml" -version = "0.5.9" -authors = ["Alex Crichton "] -license = "MIT/Apache-2.0" -readme = "README.md" -keywords = ["encoding"] -repository = "https://github.com/alexcrichton/toml-rs" -homepage = "https://github.com/alexcrichton/toml-rs" -documentation = "https://docs.rs/toml" -description = """ -A native Rust encoder and decoder of TOML-formatted files and streams. Provides -implementations of the standard Serialize/Deserialize traits for TOML data to -facilitate deserializing and serializing Rust structures. -""" -categories = ["config", "encoding", "parser-implementations"] -edition = "2018" - -[workspace] -members = ['test-suite'] - -[dependencies] -serde = "1.0.97" -indexmap = { version = "1.0", optional = true } - -[dev-dependencies] -serde_derive = "1.0" -serde_json = "1.0" - -[features] -default = [] - -# Use indexmap rather than BTreeMap as the map type of toml::Value. -# This allows data to be read into a Value and written back to a TOML string -# while preserving the order of map keys in the input. -preserve_order = ["indexmap"] diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/LICENSE-MIT b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/LICENSE-MIT deleted file mode 100644 index 39e0ed660215..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/README.md b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/README.md deleted file mode 100644 index 21dd1088a20b..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# toml-rs - -[![Latest Version](https://img.shields.io/crates/v/toml.svg)](https://crates.io/crates/toml) -[![Documentation](https://docs.rs/toml/badge.svg)](https://docs.rs/toml) - -A [TOML][toml] decoder and encoder for Rust. This library is currently compliant -with the v0.5.0 version of TOML. This library will also likely continue to stay -up to date with the TOML specification as changes happen. - -[toml]: https://github.com/toml-lang/toml - -```toml -# Cargo.toml -[dependencies] -toml = "0.5" -``` - -This crate also supports serialization/deserialization through the -[serde](https://serde.rs) crate on crates.io. Currently the older `rustc-serialize` -crate is not supported in the 0.3+ series of the `toml` crate, but 0.2 can be -used for that support. - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in toml-rs by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/decode.rs b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/decode.rs deleted file mode 100644 index 256069b350c5..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/decode.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! An example showing off the usage of `Deserialize` to automatically decode -//! TOML into a Rust `struct` - -#![deny(warnings)] -#![allow(dead_code)] - -use serde_derive::Deserialize; - -/// This is what we're going to decode into. Each field is optional, meaning -/// that it doesn't have to be present in TOML. -#[derive(Debug, Deserialize)] -struct Config { - global_string: Option, - global_integer: Option, - server: Option, - peers: Option>, -} - -/// Sub-structs are decoded from tables, so this will decode from the `[server]` -/// table. -/// -/// Again, each field is optional, meaning they don't have to be present. -#[derive(Debug, Deserialize)] -struct ServerConfig { - ip: Option, - port: Option, -} - -#[derive(Debug, Deserialize)] -struct PeerConfig { - ip: Option, - port: Option, -} - -fn main() { - let toml_str = r#" - global_string = "test" - global_integer = 5 - - [server] - ip = "127.0.0.1" - port = 80 - - [[peers]] - ip = "127.0.0.1" - port = 8080 - - [[peers]] - ip = "127.0.0.1" - "#; - - let decoded: Config = toml::from_str(toml_str).unwrap(); - println!("{:#?}", decoded); -} diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/enum_external.rs b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/enum_external.rs deleted file mode 100644 index 7de061f61476..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/enum_external.rs +++ /dev/null @@ -1,45 +0,0 @@ -//! An example showing off the usage of `Deserialize` to automatically decode -//! TOML into a Rust `struct`, with enums. - -#![deny(warnings)] -#![allow(dead_code)] - -use serde_derive::Deserialize; - -/// This is what we're going to decode into. -#[derive(Debug, Deserialize)] -struct Config { - plain: MyEnum, - plain_table: MyEnum, - tuple: MyEnum, - #[serde(rename = "struct")] - structv: MyEnum, - newtype: MyEnum, - my_enum: Vec, -} - -#[derive(Debug, Deserialize)] -enum MyEnum { - Plain, - Tuple(i64, bool), - NewType(String), - Struct { value: i64 }, -} - -fn main() { - let toml_str = r#" - plain = "Plain" - plain_table = { Plain = {} } - tuple = { Tuple = { 0 = 123, 1 = true } } - struct = { Struct = { value = 123 } } - newtype = { NewType = "value" } - my_enum = [ - { Plain = {} }, - { Tuple = { 0 = 123, 1 = true } }, - { NewType = "value" }, - { Struct = { value = 123 } } - ]"#; - - let decoded: Config = toml::from_str(toml_str).unwrap(); - println!("{:#?}", decoded); -} diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/toml2json.rs b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/toml2json.rs deleted file mode 100644 index 1b90c9fde59a..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/examples/toml2json.rs +++ /dev/null @@ -1,47 +0,0 @@ -#![deny(warnings)] - -use std::env; -use std::fs::File; -use std::io; -use std::io::prelude::*; - -use serde_json::Value as Json; -use toml::Value as Toml; - -fn main() { - let mut args = env::args(); - let mut input = String::new(); - if args.len() > 1 { - let name = args.nth(1).unwrap(); - File::open(&name) - .and_then(|mut f| f.read_to_string(&mut input)) - .unwrap(); - } else { - io::stdin().read_to_string(&mut input).unwrap(); - } - - match input.parse() { - Ok(toml) => { - let json = convert(toml); - println!("{}", serde_json::to_string_pretty(&json).unwrap()); - } - Err(error) => println!("failed to parse TOML: {}", error), - } -} - -fn convert(toml: Toml) -> Json { - match toml { - Toml::String(s) => Json::String(s), - Toml::Integer(i) => Json::Number(i.into()), - Toml::Float(f) => { - let n = serde_json::Number::from_f64(f).expect("float infinite and nan not allowed"); - Json::Number(n) - } - Toml::Boolean(b) => Json::Bool(b), - Toml::Array(arr) => Json::Array(arr.into_iter().map(convert).collect()), - Toml::Table(table) => { - Json::Object(table.into_iter().map(|(k, v)| (k, convert(v))).collect()) - } - Toml::Datetime(dt) => Json::String(dt.to_string()), - } -} diff --git a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/src/datetime.rs b/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/src/datetime.rs deleted file mode 100644 index a68b0756836d..000000000000 --- a/third_party/rust/chromium_crates_io/vendor/toml-0.5.9/src/datetime.rs +++ /dev/null @@ -1,544 +0,0 @@ -use std::error; -use std::fmt; -use std::str::{self, FromStr}; - -use serde::{de, ser}; - -/// A parsed TOML datetime value -/// -/// This structure is intended to represent the datetime primitive type that can -/// be encoded into TOML documents. This type is a parsed version that contains -/// all metadata internally. -/// -/// Currently this type is intentionally conservative and only supports -/// `to_string` as an accessor. Over time though it's intended that it'll grow -/// more support! -/// -/// Note that if you're using `Deserialize` to deserialize a TOML document, you -/// can use this as a placeholder for where you're expecting a datetime to be -/// specified. -/// -/// Also note though that while this type implements `Serialize` and -/// `Deserialize` it's only recommended to use this type with the TOML format, -/// otherwise encoded in other formats it may look a little odd. -/// -/// Depending on how the option values are used, this struct will correspond -/// with one of the following four datetimes from the [TOML v1.0.0 spec]: -/// -/// | `date` | `time` | `offset` | TOML type | -/// | --------- | --------- | --------- | ------------------ | -/// | `Some(_)` | `Some(_)` | `Some(_)` | [Offset Date-Time] | -/// | `Some(_)` | `Some(_)` | `None` | [Local Date-Time] | -/// | `Some(_)` | `None` | `None` | [Local Date] | -/// | `None` | `Some(_)` | `None` | [Local Time] | -/// -/// **1. Offset Date-Time**: If all the optional values are used, `Datetime` -/// corresponds to an [Offset Date-Time]. From the TOML v1.0.0 spec: -/// -/// > To unambiguously represent a specific instant in time, you may use an -/// > RFC 3339 formatted date-time with offset. -/// > -/// > ```toml -/// > odt1 = 1979-05-27T07:32:00Z -/// > odt2 = 1979-05-27T00:32:00-07:00 -/// > odt3 = 1979-05-27T00:32:00.999999-07:00 -/// > ``` -/// > -/// > For the sake of readability, you may replace the T delimiter between date -/// > and time with a space character (as permitted by RFC 3339 section 5.6). -/// > -/// > ```toml -/// > odt4 = 1979-05-27 07:32:00Z -/// > ``` -/// -/// **2. Local Date-Time**: If `date` and `time` are given but `offset` is -/// `None`, `Datetime` corresponds to a [Local Date-Time]. From the spec: -/// -/// > If you omit the offset from an RFC 3339 formatted date-time, it will -/// > represent the given date-time without any relation to an offset or -/// > timezone. It cannot be converted to an instant in time without additional -/// > information. Conversion to an instant, if required, is implementation- -/// > specific. -/// > -/// > ```toml -/// > ldt1 = 1979-05-27T07:32:00 -/// > ldt2 = 1979-05-27T00:32:00.999999 -/// > ``` -/// -/// **3. Local Date**: If only `date` is given, `Datetime` corresponds to a -/// [Local Date]; see the docs for [`Date`]. -/// -/// **4. Local Time**: If only `time` is given, `Datetime` corresponds to a -/// [Local Time]; see the docs for [`Time`]. -/// -/// [TOML v1.0.0 spec]: https://toml.io/en/v1.0.0 -/// [Offset Date-Time]: https://toml.io/en/v1.0.0#offset-date-time -/// [Local Date-Time]: https://toml.io/en/v1.0.0#local-date-time -/// [Local Date]: https://toml.io/en/v1.0.0#local-date -/// [Local Time]: https://toml.io/en/v1.0.0#local-time -#[derive(PartialEq, Clone)] -pub struct Datetime { - /// Optional date. - /// Required for: *Offset Date-Time*, *Local Date-Time*, *Local Date*. - pub date: Option, - - /// Optional time. - /// Required for: *Offset Date-Time*, *Local Date-Time*, *Local Time*. - pub time: Option
for ArrayOfTables { + fn extend>(&mut self, iter: T) { + for value in iter { + self.push(value); + } + } +} + +impl FromIterator
for ArrayOfTables { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let v = iter.into_iter().map(Item::Table); + ArrayOfTables { + values: v.collect(), + span: None, + } + } +} + +impl IntoIterator for ArrayOfTables { + type Item = Table; + type IntoIter = ArrayOfTablesIntoIter; + + fn into_iter(self) -> Self::IntoIter { + Box::new( + self.values + .into_iter() + .filter(|v| v.is_table()) + .map(|v| v.into_table().unwrap()), + ) + } +} + +impl<'s> IntoIterator for &'s ArrayOfTables { + type Item = &'s Table; + type IntoIter = ArrayOfTablesIter<'s>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +#[cfg(feature = "display")] +impl std::fmt::Display for ArrayOfTables { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // HACK: Without the header, we don't really have a proper way of printing this + self.clone().into_array().fmt(f) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/array.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/array.rs new file mode 100644 index 000000000000..eeedd7ba2721 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/array.rs @@ -0,0 +1,97 @@ +use crate::de::Error; + +pub(crate) struct ArrayDeserializer { + input: Vec, + span: Option>, +} + +impl ArrayDeserializer { + pub(crate) fn new(input: Vec, span: Option>) -> Self { + Self { input, span } + } +} + +// Note: this is wrapped by `ValueDeserializer` and any trait methods +// implemented here need to be wrapped there +impl<'de> serde::Deserializer<'de> for ArrayDeserializer { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + visitor.visit_seq(ArraySeqAccess::new(self.input)) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + if serde_spanned::__unstable::is_spanned(name, fields) { + if let Some(span) = self.span.clone() { + return visitor.visit_map(super::SpannedDeserializer::new(self, span)); + } + } + + self.deserialize_any(visitor) + } + + serde::forward_to_deserialize_any! { + bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string seq + bytes byte_buf map option unit newtype_struct + ignored_any unit_struct tuple_struct tuple enum identifier + } +} + +impl<'de> serde::de::IntoDeserializer<'de, Error> for ArrayDeserializer { + type Deserializer = Self; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +impl crate::Array { + pub(crate) fn into_deserializer(self) -> ArrayDeserializer { + ArrayDeserializer::new(self.values, self.span) + } +} + +impl crate::ArrayOfTables { + pub(crate) fn into_deserializer(self) -> ArrayDeserializer { + ArrayDeserializer::new(self.values, self.span) + } +} + +pub(crate) struct ArraySeqAccess { + iter: std::vec::IntoIter, +} + +impl ArraySeqAccess { + pub(crate) fn new(input: Vec) -> Self { + Self { + iter: input.into_iter(), + } + } +} + +impl<'de> serde::de::SeqAccess<'de> for ArraySeqAccess { + type Error = Error; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: serde::de::DeserializeSeed<'de>, + { + match self.iter.next() { + Some(v) => seed + .deserialize(crate::de::ValueDeserializer::new(v)) + .map(Some), + None => Ok(None), + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/datetime.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/datetime.rs new file mode 100644 index 000000000000..14de28b3f13c --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/datetime.rs @@ -0,0 +1,43 @@ +use serde::de::value::BorrowedStrDeserializer; +use serde::de::IntoDeserializer; + +use crate::de::Error; + +pub(crate) struct DatetimeDeserializer { + date: Option, +} + +impl DatetimeDeserializer { + pub(crate) fn new(date: crate::Datetime) -> Self { + Self { date: Some(date) } + } +} + +impl<'de> serde::de::MapAccess<'de> for DatetimeDeserializer { + type Error = Error; + + fn next_key_seed(&mut self, seed: K) -> Result, Error> + where + K: serde::de::DeserializeSeed<'de>, + { + if self.date.is_some() { + seed.deserialize(BorrowedStrDeserializer::new( + toml_datetime::__unstable::FIELD, + )) + .map(Some) + } else { + Ok(None) + } + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: serde::de::DeserializeSeed<'de>, + { + if let Some(date) = self.date.take() { + seed.deserialize(date.to_string().into_deserializer()) + } else { + panic!("next_value_seed called before next_key_seed") + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/key.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/key.rs new file mode 100644 index 000000000000..7536078f6d92 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/key.rs @@ -0,0 +1,151 @@ +use serde::de::IntoDeserializer; + +use super::Error; + +pub(crate) struct KeyDeserializer { + span: Option>, + key: crate::Key, +} + +impl KeyDeserializer { + pub(crate) fn new(key: crate::Key, span: Option>) -> Self { + KeyDeserializer { span, key } + } +} + +impl<'de> IntoDeserializer<'de, Error> for KeyDeserializer { + type Deserializer = Self; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +impl<'de> serde::de::Deserializer<'de> for KeyDeserializer { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + self.key.into_deserializer().deserialize_any(visitor) + } + + fn deserialize_enum( + self, + name: &str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + let _ = name; + let _ = variants; + visitor.visit_enum(self) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + if serde_spanned::__unstable::is_spanned(name, fields) { + if let Some(span) = self.span.clone() { + return visitor.visit_map(super::SpannedDeserializer::new(self.key.get(), span)); + } + } + self.deserialize_any(visitor) + } + + fn deserialize_newtype_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + serde::forward_to_deserialize_any! { + bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string seq + bytes byte_buf map option unit + ignored_any unit_struct tuple_struct tuple identifier + } +} + +impl<'de> serde::de::EnumAccess<'de> for KeyDeserializer { + type Error = Error; + type Variant = UnitOnly; + + fn variant_seed(self, seed: T) -> Result<(T::Value, Self::Variant), Self::Error> + where + T: serde::de::DeserializeSeed<'de>, + { + seed.deserialize(self).map(unit_only) + } +} + +pub(crate) struct UnitOnly { + marker: std::marker::PhantomData, +} + +fn unit_only(t: T) -> (T, UnitOnly) { + ( + t, + UnitOnly { + marker: std::marker::PhantomData, + }, + ) +} + +impl<'de, E> serde::de::VariantAccess<'de> for UnitOnly +where + E: serde::de::Error, +{ + type Error = E; + + fn unit_variant(self) -> Result<(), Self::Error> { + Ok(()) + } + + fn newtype_variant_seed(self, _seed: T) -> Result + where + T: serde::de::DeserializeSeed<'de>, + { + Err(serde::de::Error::invalid_type( + serde::de::Unexpected::UnitVariant, + &"newtype variant", + )) + } + + fn tuple_variant(self, _len: usize, _visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + Err(serde::de::Error::invalid_type( + serde::de::Unexpected::UnitVariant, + &"tuple variant", + )) + } + + fn struct_variant( + self, + _fields: &'static [&'static str], + _visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + Err(serde::de::Error::invalid_type( + serde::de::Unexpected::UnitVariant, + &"struct variant", + )) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/mod.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/mod.rs new file mode 100644 index 000000000000..fe1d285a5bee --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/mod.rs @@ -0,0 +1,321 @@ +//! Deserializing TOML into Rust structures. +//! +//! This module contains all the Serde support for deserializing TOML documents into Rust structures. + +use serde::de::DeserializeOwned; + +mod array; +mod datetime; +mod key; +mod spanned; +mod table; +mod table_enum; +mod value; + +use array::ArrayDeserializer; +use datetime::DatetimeDeserializer; +use key::KeyDeserializer; +use spanned::SpannedDeserializer; +use table_enum::TableEnumDeserializer; + +pub use value::ValueDeserializer; + +/// Errors that can occur when deserializing a type. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Error { + inner: crate::TomlError, +} + +impl Error { + pub(crate) fn custom(msg: T, span: Option>) -> Self + where + T: std::fmt::Display, + { + Error { + inner: crate::TomlError::custom(msg.to_string(), span), + } + } + + /// Add key while unwinding + pub fn add_key(&mut self, key: String) { + self.inner.add_key(key); + } + + /// What went wrong + pub fn message(&self) -> &str { + self.inner.message() + } + + /// The start/end index into the original document where the error occurred + pub fn span(&self) -> Option> { + self.inner.span() + } + + pub(crate) fn set_span(&mut self, span: Option>) { + self.inner.set_span(span); + } +} + +impl serde::de::Error for Error { + fn custom(msg: T) -> Self + where + T: std::fmt::Display, + { + Error::custom(msg, None) + } +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.inner.fmt(f) + } +} + +impl From for Error { + fn from(e: crate::TomlError) -> Error { + Self { inner: e } + } +} + +impl From for crate::TomlError { + fn from(e: Error) -> crate::TomlError { + e.inner + } +} + +impl std::error::Error for Error {} + +/// Convert a TOML [documents][crate::DocumentMut] into `T`. +#[cfg(feature = "parse")] +pub fn from_str(s: &'_ str) -> Result +where + T: DeserializeOwned, +{ + let de = Deserializer::parse(s)?; + T::deserialize(de) +} + +/// Convert a TOML [documents][crate::DocumentMut] into `T`. +#[cfg(feature = "parse")] +pub fn from_slice(s: &'_ [u8]) -> Result +where + T: DeserializeOwned, +{ + let s = std::str::from_utf8(s).map_err(|e| Error::custom(e, None))?; + from_str(s) +} + +/// Convert a [`DocumentMut`][crate::DocumentMut] into `T`. +pub fn from_document(d: impl Into) -> Result +where + T: DeserializeOwned, +{ + let deserializer = d.into(); + T::deserialize(deserializer) +} + +/// Deserialization for TOML [documents][crate::DocumentMut]. +pub struct Deserializer { + root: crate::Item, + raw: Option, +} + +impl Deserializer { + /// Deserialization implementation for TOML. + #[deprecated(since = "0.22.6", note = "Replaced with `Deserializer::from`")] + pub fn new(input: crate::DocumentMut) -> Self { + Self::from(input) + } +} + +#[cfg(feature = "parse")] +impl> Deserializer { + /// Parse a TOML document + pub fn parse(raw: S) -> Result { + crate::ImDocument::parse(raw) + .map(Self::from) + .map_err(Into::into) + } +} + +impl From for Deserializer { + fn from(doc: crate::DocumentMut) -> Self { + let crate::DocumentMut { root, .. } = doc; + Self { root, raw: None } + } +} + +impl From> for Deserializer { + fn from(doc: crate::ImDocument) -> Self { + let crate::ImDocument { root, raw, .. } = doc; + let raw = Some(raw); + Self { root, raw } + } +} + +#[cfg(feature = "parse")] +impl std::str::FromStr for Deserializer { + type Err = Error; + + /// Parses a document from a &str + fn from_str(s: &str) -> Result { + let doc: crate::ImDocument<_> = s.parse().map_err(Error::from)?; + Ok(Deserializer::from(doc)) + } +} + +// Note: this is wrapped by `toml::de::Deserializer` and any trait methods +// implemented here need to be wrapped there +impl<'de, S: Into> serde::Deserializer<'de> for Deserializer { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + let raw = self.raw; + self.root + .into_deserializer() + .deserialize_any(visitor) + .map_err(|mut e: Self::Error| { + e.inner.set_raw(raw.map(|r| r.into())); + e + }) + } + + // `None` is interpreted as a missing field so be sure to implement `Some` + // as a present field. + fn deserialize_option(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + let raw = self.raw; + self.root + .into_deserializer() + .deserialize_option(visitor) + .map_err(|mut e: Self::Error| { + e.inner.set_raw(raw.map(|r| r.into())); + e + }) + } + + fn deserialize_newtype_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + let raw = self.raw; + self.root + .into_deserializer() + .deserialize_newtype_struct(name, visitor) + .map_err(|mut e: Self::Error| { + e.inner.set_raw(raw.map(|r| r.into())); + e + }) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + let raw = self.raw; + self.root + .into_deserializer() + .deserialize_struct(name, fields, visitor) + .map_err(|mut e: Self::Error| { + e.inner.set_raw(raw.map(|r| r.into())); + e + }) + } + + // Called when the type to deserialize is an enum, as opposed to a field in the type. + fn deserialize_enum( + self, + name: &'static str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + let raw = self.raw; + self.root + .into_deserializer() + .deserialize_enum(name, variants, visitor) + .map_err(|mut e: Self::Error| { + e.inner.set_raw(raw.map(|r| r.into())); + e + }) + } + + serde::forward_to_deserialize_any! { + bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string seq + bytes byte_buf map unit + ignored_any unit_struct tuple_struct tuple identifier + } +} + +impl<'de> serde::de::IntoDeserializer<'de, Error> for Deserializer { + type Deserializer = Deserializer; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +impl<'de> serde::de::IntoDeserializer<'de, Error> for crate::DocumentMut { + type Deserializer = Deserializer; + + fn into_deserializer(self) -> Self::Deserializer { + Deserializer::from(self) + } +} + +impl<'de> serde::de::IntoDeserializer<'de, Error> for crate::ImDocument { + type Deserializer = Deserializer; + + fn into_deserializer(self) -> Self::Deserializer { + Deserializer::from(self) + } +} + +pub(crate) fn validate_struct_keys( + table: &crate::table::KeyValuePairs, + fields: &'static [&'static str], +) -> Result<(), Error> { + let extra_fields = table + .keys() + .filter_map(|key| { + if !fields.contains(&key.get()) { + Some(key.clone()) + } else { + None + } + }) + .collect::>(); + + if extra_fields.is_empty() { + Ok(()) + } else { + Err(Error::custom( + format!( + "unexpected keys in table: {}, available keys: {}", + extra_fields + .iter() + .map(|k| k.get()) + .collect::>() + .join(", "), + fields.join(", "), + ), + extra_fields[0].span(), + )) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/spanned.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/spanned.rs new file mode 100644 index 000000000000..7ce58640aeb7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/spanned.rs @@ -0,0 +1,70 @@ +use serde::de::value::BorrowedStrDeserializer; +use serde::de::IntoDeserializer as _; + +use super::Error; + +pub(crate) struct SpannedDeserializer<'de, T: serde::de::IntoDeserializer<'de, Error>> { + phantom_data: std::marker::PhantomData<&'de ()>, + start: Option, + end: Option, + value: Option, +} + +impl<'de, T> SpannedDeserializer<'de, T> +where + T: serde::de::IntoDeserializer<'de, Error>, +{ + pub(crate) fn new(value: T, span: std::ops::Range) -> Self { + Self { + phantom_data: Default::default(), + start: Some(span.start), + end: Some(span.end), + value: Some(value), + } + } +} + +impl<'de, T> serde::de::MapAccess<'de> for SpannedDeserializer<'de, T> +where + T: serde::de::IntoDeserializer<'de, Error>, +{ + type Error = Error; + fn next_key_seed(&mut self, seed: K) -> Result, Error> + where + K: serde::de::DeserializeSeed<'de>, + { + if self.start.is_some() { + seed.deserialize(BorrowedStrDeserializer::new( + serde_spanned::__unstable::START_FIELD, + )) + .map(Some) + } else if self.end.is_some() { + seed.deserialize(BorrowedStrDeserializer::new( + serde_spanned::__unstable::END_FIELD, + )) + .map(Some) + } else if self.value.is_some() { + seed.deserialize(BorrowedStrDeserializer::new( + serde_spanned::__unstable::VALUE_FIELD, + )) + .map(Some) + } else { + Ok(None) + } + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: serde::de::DeserializeSeed<'de>, + { + if let Some(start) = self.start.take() { + seed.deserialize(start.into_deserializer()) + } else if let Some(end) = self.end.take() { + seed.deserialize(end.into_deserializer()) + } else if let Some(value) = self.value.take() { + seed.deserialize(value.into_deserializer()) + } else { + panic!("next_value_seed called before next_key_seed") + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/table.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/table.rs new file mode 100644 index 000000000000..436d17a3da44 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/table.rs @@ -0,0 +1,214 @@ +use serde::de::IntoDeserializer; + +use crate::de::Error; + +pub(crate) struct TableDeserializer { + span: Option>, + items: crate::table::KeyValuePairs, +} + +// Note: this is wrapped by `Deserializer` and `ValueDeserializer` and any trait methods +// implemented here need to be wrapped there +impl<'de> serde::Deserializer<'de> for TableDeserializer { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + visitor.visit_map(TableMapAccess::new(self)) + } + + // `None` is interpreted as a missing field so be sure to implement `Some` + // as a present field. + fn deserialize_option(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + visitor.visit_some(self) + } + + fn deserialize_newtype_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + visitor.visit_newtype_struct(self) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + if serde_spanned::__unstable::is_spanned(name, fields) { + if let Some(span) = self.span.clone() { + return visitor.visit_map(super::SpannedDeserializer::new(self, span)); + } + } + + self.deserialize_any(visitor) + } + + // Called when the type to deserialize is an enum, as opposed to a field in the type. + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + if self.items.is_empty() { + Err(Error::custom( + "wanted exactly 1 element, found 0 elements", + self.span, + )) + } else if self.items.len() != 1 { + Err(Error::custom( + "wanted exactly 1 element, more than 1 element", + self.span, + )) + } else { + visitor.visit_enum(TableMapAccess::new(self)) + } + } + + serde::forward_to_deserialize_any! { + bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string seq + bytes byte_buf map unit + ignored_any unit_struct tuple_struct tuple identifier + } +} + +impl<'de> IntoDeserializer<'de, Error> for TableDeserializer { + type Deserializer = TableDeserializer; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +impl crate::Table { + pub(crate) fn into_deserializer(self) -> TableDeserializer { + TableDeserializer { + span: self.span(), + items: self.items, + } + } +} + +impl crate::InlineTable { + pub(crate) fn into_deserializer(self) -> TableDeserializer { + TableDeserializer { + span: self.span(), + items: self.items, + } + } +} + +pub(crate) struct TableMapAccess { + iter: indexmap::map::IntoIter, + span: Option>, + value: Option<(crate::Key, crate::Item)>, +} + +impl TableMapAccess { + pub(crate) fn new(input: TableDeserializer) -> Self { + Self { + iter: input.items.into_iter(), + span: input.span, + value: None, + } + } +} + +impl<'de> serde::de::MapAccess<'de> for TableMapAccess { + type Error = Error; + + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: serde::de::DeserializeSeed<'de>, + { + match self.iter.next() { + Some((k, v)) => { + let key_span = k.span(); + let ret = seed + .deserialize(super::KeyDeserializer::new(k.clone(), key_span.clone())) + .map(Some) + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(key_span); + } + e + }); + self.value = Some((k, v)); + ret + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: serde::de::DeserializeSeed<'de>, + { + match self.value.take() { + Some((k, v)) => { + let span = v.span().or_else(|| k.span()); + seed.deserialize(crate::de::ValueDeserializer::new(v)) + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e.add_key(k.get().to_owned()); + e + }) + } + None => { + panic!("no more values in next_value_seed, internal error in ValueDeserializer") + } + } + } +} + +impl<'de> serde::de::EnumAccess<'de> for TableMapAccess { + type Error = Error; + type Variant = super::TableEnumDeserializer; + + fn variant_seed(mut self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error> + where + V: serde::de::DeserializeSeed<'de>, + { + let (key, value) = match self.iter.next() { + Some(pair) => pair, + None => { + return Err(Error::custom( + "expected table with exactly 1 entry, found empty table", + self.span, + )); + } + }; + + let val = seed + .deserialize(key.into_deserializer()) + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(key.span()); + } + e + })?; + + let variant = super::TableEnumDeserializer::new(value); + + Ok((val, variant)) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/table_enum.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/table_enum.rs new file mode 100644 index 000000000000..8c3433f419a0 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/table_enum.rs @@ -0,0 +1,176 @@ +use crate::de::Error; + +/// Deserializes table values into enum variants. +pub(crate) struct TableEnumDeserializer { + value: crate::Item, +} + +impl TableEnumDeserializer { + pub(crate) fn new(value: crate::Item) -> Self { + TableEnumDeserializer { value } + } +} + +impl<'de> serde::de::VariantAccess<'de> for TableEnumDeserializer { + type Error = Error; + + fn unit_variant(self) -> Result<(), Self::Error> { + match self.value { + crate::Item::ArrayOfTables(values) => { + if values.is_empty() { + Ok(()) + } else { + Err(Error::custom("expected empty array", values.span())) + } + } + crate::Item::Value(crate::Value::Array(values)) => { + if values.is_empty() { + Ok(()) + } else { + Err(Error::custom("expected empty table", values.span())) + } + } + crate::Item::Table(values) => { + if values.is_empty() { + Ok(()) + } else { + Err(Error::custom("expected empty table", values.span())) + } + } + crate::Item::Value(crate::Value::InlineTable(values)) => { + if values.is_empty() { + Ok(()) + } else { + Err(Error::custom("expected empty table", values.span())) + } + } + e => Err(Error::custom( + format!("expected table, found {}", e.type_name()), + e.span(), + )), + } + } + + fn newtype_variant_seed(self, seed: T) -> Result + where + T: serde::de::DeserializeSeed<'de>, + { + seed.deserialize(super::ValueDeserializer::new(self.value)) + } + + fn tuple_variant(self, len: usize, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + match self.value { + crate::Item::ArrayOfTables(values) => { + let values_span = values.span(); + let tuple_values = values.values.into_iter().collect::>(); + + if tuple_values.len() == len { + serde::de::Deserializer::deserialize_seq( + super::ArrayDeserializer::new(tuple_values, values_span), + visitor, + ) + } else { + Err(Error::custom( + format!("expected tuple with length {}", len), + values_span, + )) + } + } + crate::Item::Value(crate::Value::Array(values)) => { + let values_span = values.span(); + let tuple_values = values.values.into_iter().collect::>(); + + if tuple_values.len() == len { + serde::de::Deserializer::deserialize_seq( + super::ArrayDeserializer::new(tuple_values, values_span), + visitor, + ) + } else { + Err(Error::custom( + format!("expected tuple with length {}", len), + values_span, + )) + } + } + crate::Item::Table(values) => { + let values_span = values.span(); + let tuple_values: Result, _> = values + .items + .into_iter() + .enumerate() + .map(|(index, (key, value))| match key.get().parse::() { + Ok(key_index) if key_index == index => Ok(value), + Ok(_) | Err(_) => Err(Error::custom( + format!("expected table key `{}`, but was `{}`", index, key.get()), + key.span(), + )), + }) + .collect(); + let tuple_values = tuple_values?; + + if tuple_values.len() == len { + serde::de::Deserializer::deserialize_seq( + super::ArrayDeserializer::new(tuple_values, values_span), + visitor, + ) + } else { + Err(Error::custom( + format!("expected tuple with length {}", len), + values_span, + )) + } + } + crate::Item::Value(crate::Value::InlineTable(values)) => { + let values_span = values.span(); + let tuple_values: Result, _> = values + .items + .into_iter() + .enumerate() + .map(|(index, (key, value))| match key.get().parse::() { + Ok(key_index) if key_index == index => Ok(value), + Ok(_) | Err(_) => Err(Error::custom( + format!("expected table key `{}`, but was `{}`", index, key.get()), + key.span(), + )), + }) + .collect(); + let tuple_values = tuple_values?; + + if tuple_values.len() == len { + serde::de::Deserializer::deserialize_seq( + super::ArrayDeserializer::new(tuple_values, values_span), + visitor, + ) + } else { + Err(Error::custom( + format!("expected tuple with length {}", len), + values_span, + )) + } + } + e => Err(Error::custom( + format!("expected table, found {}", e.type_name()), + e.span(), + )), + } + } + + fn struct_variant( + self, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + serde::de::Deserializer::deserialize_struct( + super::ValueDeserializer::new(self.value).with_struct_key_validation(), + "", // TODO: this should be the variant name + fields, + visitor, + ) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/value.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/value.rs new file mode 100644 index 000000000000..d7fb4a4429d7 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/de/value.rs @@ -0,0 +1,257 @@ +use serde::de::IntoDeserializer as _; + +use crate::de::DatetimeDeserializer; +use crate::de::Error; + +/// Deserialization implementation for TOML [values][crate::Value]. +/// +/// Can be created either directly from TOML strings, using [`std::str::FromStr`], +/// or from parsed [values][crate::Value] using [`serde::de::IntoDeserializer::into_deserializer`]. +/// +/// # Example +/// +/// ``` +/// # #[cfg(feature = "parse")] { +/// # #[cfg(feature = "display")] { +/// use serde::Deserialize; +/// +/// #[derive(Deserialize)] +/// struct Config { +/// title: String, +/// owner: Owner, +/// } +/// +/// #[derive(Deserialize)] +/// struct Owner { +/// name: String, +/// } +/// +/// let value = r#"{ title = 'TOML Example', owner = { name = 'Lisa' } }"#; +/// let deserializer = value.parse::().unwrap(); +/// let config = Config::deserialize(deserializer).unwrap(); +/// assert_eq!(config.title, "TOML Example"); +/// assert_eq!(config.owner.name, "Lisa"); +/// # } +/// # } +/// ``` +pub struct ValueDeserializer { + input: crate::Item, + validate_struct_keys: bool, +} + +impl ValueDeserializer { + pub(crate) fn new(input: crate::Item) -> Self { + Self { + input, + validate_struct_keys: false, + } + } + + pub(crate) fn with_struct_key_validation(mut self) -> Self { + self.validate_struct_keys = true; + self + } +} + +// Note: this is wrapped by `toml::de::ValueDeserializer` and any trait methods +// implemented here need to be wrapped there +impl<'de> serde::Deserializer<'de> for ValueDeserializer { + type Error = Error; + + fn deserialize_any(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + let span = self.input.span(); + match self.input { + crate::Item::None => visitor.visit_none(), + crate::Item::Value(crate::Value::String(v)) => visitor.visit_string(v.into_value()), + crate::Item::Value(crate::Value::Integer(v)) => visitor.visit_i64(v.into_value()), + crate::Item::Value(crate::Value::Float(v)) => visitor.visit_f64(v.into_value()), + crate::Item::Value(crate::Value::Boolean(v)) => visitor.visit_bool(v.into_value()), + crate::Item::Value(crate::Value::Datetime(v)) => { + visitor.visit_map(DatetimeDeserializer::new(v.into_value())) + } + crate::Item::Value(crate::Value::Array(v)) => { + v.into_deserializer().deserialize_any(visitor) + } + crate::Item::Value(crate::Value::InlineTable(v)) => { + v.into_deserializer().deserialize_any(visitor) + } + crate::Item::Table(v) => v.into_deserializer().deserialize_any(visitor), + crate::Item::ArrayOfTables(v) => v.into_deserializer().deserialize_any(visitor), + } + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e + }) + } + + // `None` is interpreted as a missing field so be sure to implement `Some` + // as a present field. + fn deserialize_option(self, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, + { + let span = self.input.span(); + visitor.visit_some(self).map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e + }) + } + + fn deserialize_newtype_struct( + self, + _name: &'static str, + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + let span = self.input.span(); + visitor + .visit_newtype_struct(self) + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e + }) + } + + fn deserialize_struct( + self, + name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + if serde_spanned::__unstable::is_spanned(name, fields) { + if let Some(span) = self.input.span() { + return visitor.visit_map(super::SpannedDeserializer::new(self, span)); + } + } + + if name == toml_datetime::__unstable::NAME && fields == [toml_datetime::__unstable::FIELD] { + let span = self.input.span(); + if let crate::Item::Value(crate::Value::Datetime(d)) = self.input { + return visitor + .visit_map(DatetimeDeserializer::new(d.into_value())) + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e + }); + } + } + + if self.validate_struct_keys { + let span = self.input.span(); + match &self.input { + crate::Item::Table(values) => super::validate_struct_keys(&values.items, fields), + crate::Item::Value(crate::Value::InlineTable(values)) => { + super::validate_struct_keys(&values.items, fields) + } + _ => Ok(()), + } + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e + })?; + } + + self.deserialize_any(visitor) + } + + // Called when the type to deserialize is an enum, as opposed to a field in the type. + fn deserialize_enum( + self, + name: &'static str, + variants: &'static [&'static str], + visitor: V, + ) -> Result + where + V: serde::de::Visitor<'de>, + { + let span = self.input.span(); + match self.input { + crate::Item::Value(crate::Value::String(v)) => { + visitor.visit_enum(v.into_value().into_deserializer()) + } + crate::Item::Value(crate::Value::InlineTable(v)) => { + if v.is_empty() { + Err(Error::custom( + "wanted exactly 1 element, found 0 elements", + v.span(), + )) + } else if v.len() != 1 { + Err(Error::custom( + "wanted exactly 1 element, more than 1 element", + v.span(), + )) + } else { + v.into_deserializer() + .deserialize_enum(name, variants, visitor) + } + } + crate::Item::Table(v) => v + .into_deserializer() + .deserialize_enum(name, variants, visitor), + e => Err(Error::custom("wanted string or table", e.span())), + } + .map_err(|mut e: Self::Error| { + if e.span().is_none() { + e.set_span(span); + } + e + }) + } + + serde::forward_to_deserialize_any! { + bool u8 u16 u32 u64 i8 i16 i32 i64 f32 f64 char str string seq + bytes byte_buf map unit + ignored_any unit_struct tuple_struct tuple identifier + } +} + +impl<'de> serde::de::IntoDeserializer<'de, Error> for ValueDeserializer { + type Deserializer = Self; + + fn into_deserializer(self) -> Self::Deserializer { + self + } +} + +impl<'de> serde::de::IntoDeserializer<'de, Error> for crate::Value { + type Deserializer = ValueDeserializer; + + fn into_deserializer(self) -> Self::Deserializer { + ValueDeserializer::new(crate::Item::Value(self)) + } +} + +impl crate::Item { + pub(crate) fn into_deserializer(self) -> ValueDeserializer { + ValueDeserializer::new(self) + } +} + +#[cfg(feature = "parse")] +impl std::str::FromStr for ValueDeserializer { + type Err = Error; + + /// Parses a value from a &str + fn from_str(s: &str) -> Result { + let v = crate::parser::parse_value(s).map_err(Error::from)?; + Ok(v.into_deserializer()) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/document.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/document.rs new file mode 100644 index 000000000000..40edf76d1832 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/document.rs @@ -0,0 +1,211 @@ +use std::str::FromStr; + +use crate::table::Iter; +use crate::{Item, RawString, Table}; + +/// Type representing a parsed TOML document +#[derive(Debug, Clone)] +pub struct ImDocument { + pub(crate) root: Item, + // Trailing comments and whitespaces + pub(crate) trailing: RawString, + pub(crate) raw: S, +} + +impl ImDocument<&'static str> { + /// Creates an empty document + pub fn new() -> Self { + Default::default() + } +} + +#[cfg(feature = "parse")] +impl> ImDocument { + /// Parse a TOML document + pub fn parse(raw: S) -> Result { + crate::parser::parse_document(raw) + } +} + +impl> ImDocument { + /// # Panics + /// + /// If run on on a [`DocumentMut`] not generated by the parser + pub(crate) fn despan(&mut self) { + self.root.despan(self.raw.as_ref()); + self.trailing.despan(self.raw.as_ref()); + } +} + +impl ImDocument { + /// Returns a reference to the root item. + pub fn as_item(&self) -> &Item { + &self.root + } + + /// Returns a reference to the root table. + pub fn as_table(&self) -> &Table { + self.root.as_table().expect("root should always be a table") + } + + /// Returns an iterator over the root table. + pub fn iter(&self) -> Iter<'_> { + self.as_table().iter() + } + + /// Whitespace after last element + pub fn trailing(&self) -> &RawString { + &self.trailing + } +} + +impl> ImDocument { + /// Access the raw, unparsed document + pub fn raw(&self) -> &str { + self.raw.as_ref() + } +} + +impl> ImDocument { + /// Allow editing of the [`DocumentMut`] + pub fn into_mut(mut self) -> DocumentMut { + self.despan(); + DocumentMut { + root: self.root, + trailing: self.trailing, + } + } +} + +impl Default for ImDocument<&'static str> { + fn default() -> Self { + Self { + root: Item::Table(Table::with_pos(Some(0))), + trailing: Default::default(), + raw: "", + } + } +} + +#[cfg(feature = "parse")] +impl FromStr for ImDocument { + type Err = crate::TomlError; + + /// Parses a document from a &str + fn from_str(s: &str) -> Result { + Self::parse(s.to_owned()) + } +} + +impl std::ops::Deref for ImDocument { + type Target = Table; + + fn deref(&self) -> &Self::Target { + self.as_table() + } +} + +/// Type representing a TOML document +#[derive(Debug, Clone)] +pub struct DocumentMut { + pub(crate) root: Item, + // Trailing comments and whitespaces + pub(crate) trailing: RawString, +} + +impl DocumentMut { + /// Creates an empty document + pub fn new() -> Self { + Default::default() + } + + /// Returns a reference to the root item. + pub fn as_item(&self) -> &Item { + &self.root + } + + /// Returns a mutable reference to the root item. + pub fn as_item_mut(&mut self) -> &mut Item { + &mut self.root + } + + /// Returns a reference to the root table. + pub fn as_table(&self) -> &Table { + self.root.as_table().expect("root should always be a table") + } + + /// Returns a mutable reference to the root table. + pub fn as_table_mut(&mut self) -> &mut Table { + self.root + .as_table_mut() + .expect("root should always be a table") + } + + /// Returns an iterator over the root table. + pub fn iter(&self) -> Iter<'_> { + self.as_table().iter() + } + + /// Set whitespace after last element + pub fn set_trailing(&mut self, trailing: impl Into) { + self.trailing = trailing.into(); + } + + /// Whitespace after last element + pub fn trailing(&self) -> &RawString { + &self.trailing + } +} + +impl Default for DocumentMut { + fn default() -> Self { + Self { + root: Item::Table(Table::with_pos(Some(0))), + trailing: Default::default(), + } + } +} + +#[cfg(feature = "parse")] +impl FromStr for DocumentMut { + type Err = crate::TomlError; + + /// Parses a document from a &str + fn from_str(s: &str) -> Result { + let im = ImDocument::from_str(s)?; + Ok(im.into_mut()) + } +} + +impl std::ops::Deref for DocumentMut { + type Target = Table; + + fn deref(&self) -> &Self::Target { + self.as_table() + } +} + +impl std::ops::DerefMut for DocumentMut { + fn deref_mut(&mut self) -> &mut Self::Target { + self.as_table_mut() + } +} + +impl From
for DocumentMut { + fn from(root: Table) -> Self { + Self { + root: Item::Table(root), + ..Default::default() + } + } +} + +#[test] +#[cfg(feature = "parse")] +#[cfg(feature = "display")] +fn default_roundtrip() { + DocumentMut::default() + .to_string() + .parse::() + .unwrap(); +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/encode.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/encode.rs new file mode 100644 index 000000000000..54319c4ae841 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/encode.rs @@ -0,0 +1,597 @@ +use std::borrow::Cow; +use std::fmt::{Display, Formatter, Result, Write}; + +use toml_datetime::Datetime; + +use crate::inline_table::DEFAULT_INLINE_KEY_DECOR; +use crate::key::Key; +use crate::repr::{Formatted, Repr, ValueRepr}; +use crate::table::{ + DEFAULT_KEY_DECOR, DEFAULT_KEY_PATH_DECOR, DEFAULT_ROOT_DECOR, DEFAULT_TABLE_DECOR, +}; +use crate::value::{ + DEFAULT_LEADING_VALUE_DECOR, DEFAULT_TRAILING_VALUE_DECOR, DEFAULT_VALUE_DECOR, +}; +use crate::DocumentMut; +use crate::{Array, InlineTable, Item, Table, Value}; + +pub(crate) fn encode_key(this: &Key, buf: &mut dyn Write, input: Option<&str>) -> Result { + if let Some(input) = input { + let repr = this + .as_repr() + .map(Cow::Borrowed) + .unwrap_or_else(|| Cow::Owned(this.default_repr())); + repr.encode(buf, input)?; + } else { + let repr = this.display_repr(); + write!(buf, "{}", repr)?; + }; + + Ok(()) +} + +fn encode_key_path( + this: &[Key], + buf: &mut dyn Write, + input: Option<&str>, + default_decor: (&str, &str), +) -> Result { + let leaf_decor = this.last().expect("always at least one key").leaf_decor(); + for (i, key) in this.iter().enumerate() { + let dotted_decor = key.dotted_decor(); + + let first = i == 0; + let last = i + 1 == this.len(); + + if first { + leaf_decor.prefix_encode(buf, input, default_decor.0)?; + } else { + write!(buf, ".")?; + dotted_decor.prefix_encode(buf, input, DEFAULT_KEY_PATH_DECOR.0)?; + } + + encode_key(key, buf, input)?; + + if last { + leaf_decor.suffix_encode(buf, input, default_decor.1)?; + } else { + dotted_decor.suffix_encode(buf, input, DEFAULT_KEY_PATH_DECOR.1)?; + } + } + Ok(()) +} + +pub(crate) fn encode_key_path_ref( + this: &[&Key], + buf: &mut dyn Write, + input: Option<&str>, + default_decor: (&str, &str), +) -> Result { + let leaf_decor = this.last().expect("always at least one key").leaf_decor(); + for (i, key) in this.iter().enumerate() { + let dotted_decor = key.dotted_decor(); + + let first = i == 0; + let last = i + 1 == this.len(); + + if first { + leaf_decor.prefix_encode(buf, input, default_decor.0)?; + } else { + write!(buf, ".")?; + dotted_decor.prefix_encode(buf, input, DEFAULT_KEY_PATH_DECOR.0)?; + } + + encode_key(key, buf, input)?; + + if last { + leaf_decor.suffix_encode(buf, input, default_decor.1)?; + } else { + dotted_decor.suffix_encode(buf, input, DEFAULT_KEY_PATH_DECOR.1)?; + } + } + Ok(()) +} + +pub(crate) fn encode_formatted( + this: &Formatted, + buf: &mut dyn Write, + input: Option<&str>, + default_decor: (&str, &str), +) -> Result { + let decor = this.decor(); + decor.prefix_encode(buf, input, default_decor.0)?; + + if let Some(input) = input { + let repr = this + .as_repr() + .map(Cow::Borrowed) + .unwrap_or_else(|| Cow::Owned(this.default_repr())); + repr.encode(buf, input)?; + } else { + let repr = this.display_repr(); + write!(buf, "{}", repr)?; + }; + + decor.suffix_encode(buf, input, default_decor.1)?; + Ok(()) +} + +pub(crate) fn encode_array( + this: &Array, + buf: &mut dyn Write, + input: Option<&str>, + default_decor: (&str, &str), +) -> Result { + let decor = this.decor(); + decor.prefix_encode(buf, input, default_decor.0)?; + write!(buf, "[")?; + + for (i, elem) in this.iter().enumerate() { + let inner_decor; + if i == 0 { + inner_decor = DEFAULT_LEADING_VALUE_DECOR; + } else { + inner_decor = DEFAULT_VALUE_DECOR; + write!(buf, ",")?; + } + encode_value(elem, buf, input, inner_decor)?; + } + if this.trailing_comma() && !this.is_empty() { + write!(buf, ",")?; + } + + this.trailing().encode_with_default(buf, input, "")?; + write!(buf, "]")?; + decor.suffix_encode(buf, input, default_decor.1)?; + + Ok(()) +} + +pub(crate) fn encode_table( + this: &InlineTable, + buf: &mut dyn Write, + input: Option<&str>, + default_decor: (&str, &str), +) -> Result { + let decor = this.decor(); + decor.prefix_encode(buf, input, default_decor.0)?; + write!(buf, "{{")?; + this.preamble().encode_with_default(buf, input, "")?; + + let children = this.get_values(); + let len = children.len(); + for (i, (key_path, value)) in children.into_iter().enumerate() { + if i != 0 { + write!(buf, ",")?; + } + let inner_decor = if i == len - 1 { + DEFAULT_TRAILING_VALUE_DECOR + } else { + DEFAULT_VALUE_DECOR + }; + encode_key_path_ref(&key_path, buf, input, DEFAULT_INLINE_KEY_DECOR)?; + write!(buf, "=")?; + encode_value(value, buf, input, inner_decor)?; + } + + write!(buf, "}}")?; + decor.suffix_encode(buf, input, default_decor.1)?; + + Ok(()) +} + +pub(crate) fn encode_value( + this: &Value, + buf: &mut dyn Write, + input: Option<&str>, + default_decor: (&str, &str), +) -> Result { + match this { + Value::String(repr) => encode_formatted(repr, buf, input, default_decor), + Value::Integer(repr) => encode_formatted(repr, buf, input, default_decor), + Value::Float(repr) => encode_formatted(repr, buf, input, default_decor), + Value::Boolean(repr) => encode_formatted(repr, buf, input, default_decor), + Value::Datetime(repr) => encode_formatted(repr, buf, input, default_decor), + Value::Array(array) => encode_array(array, buf, input, default_decor), + Value::InlineTable(table) => encode_table(table, buf, input, default_decor), + } +} + +impl Display for DocumentMut { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + let decor = self.decor(); + decor.prefix_encode(f, None, DEFAULT_ROOT_DECOR.0)?; + + let mut path = Vec::new(); + let mut last_position = 0; + let mut tables = Vec::new(); + visit_nested_tables(self.as_table(), &mut path, false, &mut |t, p, is_array| { + if let Some(pos) = t.position() { + last_position = pos; + } + tables.push((last_position, t, p.clone(), is_array)); + Ok(()) + }) + .unwrap(); + + tables.sort_by_key(|&(id, _, _, _)| id); + let mut first_table = true; + for (_, table, path, is_array) in tables { + visit_table(f, None, table, &path, is_array, &mut first_table)?; + } + decor.suffix_encode(f, None, DEFAULT_ROOT_DECOR.1)?; + self.trailing().encode_with_default(f, None, "") + } +} + +fn visit_nested_tables<'t, F>( + table: &'t Table, + path: &mut Vec, + is_array_of_tables: bool, + callback: &mut F, +) -> Result +where + F: FnMut(&'t Table, &Vec, bool) -> Result, +{ + if !table.is_dotted() { + callback(table, path, is_array_of_tables)?; + } + + for (key, value) in table.items.iter() { + match value { + Item::Table(ref t) => { + let key = key.clone(); + path.push(key); + visit_nested_tables(t, path, false, callback)?; + path.pop(); + } + Item::ArrayOfTables(ref a) => { + for t in a.iter() { + let key = key.clone(); + path.push(key); + visit_nested_tables(t, path, true, callback)?; + path.pop(); + } + } + _ => {} + } + } + Ok(()) +} + +fn visit_table( + buf: &mut dyn Write, + input: Option<&str>, + table: &Table, + path: &[Key], + is_array_of_tables: bool, + first_table: &mut bool, +) -> Result { + let children = table.get_values(); + // We are intentionally hiding implicit tables without any tables nested under them (ie + // `table.is_empty()` which is in contrast to `table.get_values().is_empty()`). We are + // trusting the user that an empty implicit table is not semantically meaningful + // + // This allows a user to delete all tables under this implicit table and the implicit table + // will disappear. + // + // However, this means that users need to take care in deciding what tables get marked as + // implicit. + let is_visible_std_table = !(table.implicit && children.is_empty()); + + if path.is_empty() { + // don't print header for the root node + if !children.is_empty() { + *first_table = false; + } + } else if is_array_of_tables { + let default_decor = if *first_table { + *first_table = false; + ("", DEFAULT_TABLE_DECOR.1) + } else { + DEFAULT_TABLE_DECOR + }; + table.decor.prefix_encode(buf, input, default_decor.0)?; + write!(buf, "[[")?; + encode_key_path(path, buf, input, DEFAULT_KEY_PATH_DECOR)?; + write!(buf, "]]")?; + table.decor.suffix_encode(buf, input, default_decor.1)?; + writeln!(buf)?; + } else if is_visible_std_table { + let default_decor = if *first_table { + *first_table = false; + ("", DEFAULT_TABLE_DECOR.1) + } else { + DEFAULT_TABLE_DECOR + }; + table.decor.prefix_encode(buf, input, default_decor.0)?; + write!(buf, "[")?; + encode_key_path(path, buf, input, DEFAULT_KEY_PATH_DECOR)?; + write!(buf, "]")?; + table.decor.suffix_encode(buf, input, default_decor.1)?; + writeln!(buf)?; + } + // print table body + for (key_path, value) in children { + encode_key_path_ref(&key_path, buf, input, DEFAULT_KEY_DECOR)?; + write!(buf, "=")?; + encode_value(value, buf, input, DEFAULT_VALUE_DECOR)?; + writeln!(buf)?; + } + Ok(()) +} + +impl ValueRepr for String { + fn to_repr(&self) -> Repr { + to_string_repr(self, None, None) + } +} + +pub(crate) fn to_string_repr( + value: &str, + style: Option, + literal: Option, +) -> Repr { + let (style, literal) = infer_style(value, style, literal); + + let mut output = String::with_capacity(value.len() * 2); + if literal { + output.push_str(style.literal_start()); + output.push_str(value); + output.push_str(style.literal_end()); + } else { + output.push_str(style.standard_start()); + for ch in value.chars() { + match ch { + '\u{8}' => output.push_str("\\b"), + '\u{9}' => output.push_str("\\t"), + '\u{a}' => match style { + StringStyle::NewlineTriple => output.push('\n'), + StringStyle::OnelineSingle => output.push_str("\\n"), + StringStyle::OnelineTriple => unreachable!(), + }, + '\u{c}' => output.push_str("\\f"), + '\u{d}' => output.push_str("\\r"), + '\u{22}' => output.push_str("\\\""), + '\u{5c}' => output.push_str("\\\\"), + c if c <= '\u{1f}' || c == '\u{7f}' => { + write!(output, "\\u{:04X}", ch as u32).unwrap(); + } + ch => output.push(ch), + } + } + output.push_str(style.standard_end()); + } + + Repr::new_unchecked(output) +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(crate) enum StringStyle { + NewlineTriple, + OnelineTriple, + OnelineSingle, +} + +impl StringStyle { + fn literal_start(self) -> &'static str { + match self { + Self::NewlineTriple => "'''\n", + Self::OnelineTriple => "'''", + Self::OnelineSingle => "'", + } + } + fn literal_end(self) -> &'static str { + match self { + Self::NewlineTriple => "'''", + Self::OnelineTriple => "'''", + Self::OnelineSingle => "'", + } + } + + fn standard_start(self) -> &'static str { + match self { + Self::NewlineTriple => "\"\"\"\n", + // note: OnelineTriple can happen if do_pretty wants to do + // '''it's one line''' + // but literal == false + Self::OnelineTriple | Self::OnelineSingle => "\"", + } + } + + fn standard_end(self) -> &'static str { + match self { + Self::NewlineTriple => "\"\"\"", + // note: OnelineTriple can happen if do_pretty wants to do + // '''it's one line''' + // but literal == false + Self::OnelineTriple | Self::OnelineSingle => "\"", + } + } +} + +fn infer_style( + value: &str, + style: Option, + literal: Option, +) -> (StringStyle, bool) { + match (style, literal) { + (Some(style), Some(literal)) => (style, literal), + (None, Some(literal)) => (infer_all_style(value).0, literal), + (Some(style), None) => { + let literal = infer_literal(value); + (style, literal) + } + (None, None) => infer_all_style(value), + } +} + +fn infer_literal(value: &str) -> bool { + #[cfg(feature = "parse")] + { + use winnow::stream::ContainsToken as _; + (value.contains('"') | value.contains('\\')) + && value + .chars() + .all(|c| crate::parser::strings::LITERAL_CHAR.contains_token(c)) + } + #[cfg(not(feature = "parse"))] + { + false + } +} + +fn infer_all_style(value: &str) -> (StringStyle, bool) { + // We need to determine: + // - if we are a "multi-line" pretty (if there are \n) + // - if ['''] appears if multi or ['] if single + // - if there are any invalid control characters + // + // Doing it any other way would require multiple passes + // to determine if a pretty string works or not. + let mut ty = StringStyle::OnelineSingle; + // found consecutive single quotes + let mut max_found_singles = 0; + let mut found_singles = 0; + let mut prefer_literal = false; + let mut can_be_pretty = true; + + for ch in value.chars() { + if can_be_pretty { + if ch == '\'' { + found_singles += 1; + if found_singles >= 3 { + can_be_pretty = false; + } + } else { + if found_singles > max_found_singles { + max_found_singles = found_singles; + } + found_singles = 0; + } + match ch { + '\t' => {} + '"' => { + prefer_literal = true; + } + '\\' => { + prefer_literal = true; + } + '\n' => ty = StringStyle::NewlineTriple, + // Escape codes are needed if any ascii control + // characters are present, including \b \f \r. + c if c <= '\u{1f}' || c == '\u{7f}' => can_be_pretty = false, + _ => {} + } + } else { + // the string cannot be represented as pretty, + // still check if it should be multiline + if ch == '\n' { + ty = StringStyle::NewlineTriple; + } + } + } + if found_singles > 0 && value.ends_with('\'') { + // We cannot escape the ending quote so we must use """ + can_be_pretty = false; + } + if !prefer_literal { + can_be_pretty = false; + } + if !can_be_pretty { + debug_assert!(ty != StringStyle::OnelineTriple); + return (ty, false); + } + if found_singles > max_found_singles { + max_found_singles = found_singles; + } + debug_assert!(max_found_singles < 3); + if ty == StringStyle::OnelineSingle && max_found_singles >= 1 { + // no newlines, but must use ''' because it has ' in it + ty = StringStyle::OnelineTriple; + } + (ty, true) +} + +impl ValueRepr for i64 { + fn to_repr(&self) -> Repr { + Repr::new_unchecked(self.to_string()) + } +} + +impl ValueRepr for f64 { + fn to_repr(&self) -> Repr { + to_f64_repr(*self) + } +} + +fn to_f64_repr(f: f64) -> Repr { + let repr = match (f.is_sign_negative(), f.is_nan(), f == 0.0) { + (true, true, _) => "-nan".to_owned(), + (false, true, _) => "nan".to_owned(), + (true, false, true) => "-0.0".to_owned(), + (false, false, true) => "0.0".to_owned(), + (_, false, false) => { + if f % 1.0 == 0.0 { + format!("{}.0", f) + } else { + format!("{}", f) + } + } + }; + Repr::new_unchecked(repr) +} + +impl ValueRepr for bool { + fn to_repr(&self) -> Repr { + Repr::new_unchecked(self.to_string()) + } +} + +impl ValueRepr for Datetime { + fn to_repr(&self) -> Repr { + Repr::new_unchecked(self.to_string()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use proptest::prelude::*; + + proptest! { + #[test] + #[cfg(feature = "parse")] + fn parseable_string(string in "\\PC*") { + let string = Value::from(string); + let encoded = string.to_string(); + let _: Value = encoded.parse().unwrap_or_else(|err| { + panic!("error: {err} + +string: +``` +{string} +``` +") + }); + } + } + + proptest! { + #[test] + #[cfg(feature = "parse")] + fn parseable_key(string in "\\PC*") { + let string = Key::new(string); + let encoded = string.to_string(); + let _: Key = encoded.parse().unwrap_or_else(|err| { + panic!("error: {err} + +string: +``` +{string} +``` +") + }); + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/error.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/error.rs new file mode 100644 index 000000000000..57c21ef92353 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/error.rs @@ -0,0 +1,256 @@ +use std::error::Error as StdError; +use std::fmt::{Display, Formatter, Result}; + +/// Type representing a TOML parse error +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct TomlError { + message: String, + raw: Option, + keys: Vec, + span: Option>, +} + +impl TomlError { + #[cfg(feature = "parse")] + pub(crate) fn new( + error: winnow::error::ParseError< + crate::parser::prelude::Input<'_>, + winnow::error::ContextError, + >, + mut raw: crate::parser::prelude::Input<'_>, + ) -> Self { + use winnow::stream::Stream; + + let message = error.inner().to_string(); + let raw = raw.finish(); + let raw = String::from_utf8(raw.to_owned()).expect("original document was utf8"); + + let offset = error.offset(); + let offset = (0..=offset) + .rev() + .find(|index| raw.is_char_boundary(*index)) + .unwrap_or(0); + + let mut indices = raw[offset..].char_indices(); + indices.next(); + let len = if let Some((index, _)) = indices.next() { + index + } else { + raw.len() - offset + }; + let span = offset..(offset + len); + + Self { + message, + raw: Some(raw), + keys: Vec::new(), + span: Some(span), + } + } + + #[cfg(any(feature = "serde", feature = "parse"))] + pub(crate) fn custom(message: String, span: Option>) -> Self { + Self { + message, + raw: None, + keys: Vec::new(), + span, + } + } + + #[cfg(feature = "serde")] + pub(crate) fn add_key(&mut self, key: String) { + self.keys.insert(0, key); + } + + /// What went wrong + pub fn message(&self) -> &str { + &self.message + } + + /// The start/end index into the original document where the error occurred + pub fn span(&self) -> Option> { + self.span.clone() + } + + #[cfg(feature = "serde")] + pub(crate) fn set_span(&mut self, span: Option>) { + self.span = span; + } + + #[cfg(feature = "serde")] + pub(crate) fn set_raw(&mut self, raw: Option) { + self.raw = raw; + } +} + +/// Displays a TOML parse error +/// +/// # Example +/// +/// TOML parse error at line 1, column 10 +/// | +/// 1 | 00:32:00.a999999 +/// | ^ +/// Unexpected `a` +/// Expected `digit` +/// While parsing a Time +/// While parsing a Date-Time +impl Display for TomlError { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + let mut context = false; + if let (Some(raw), Some(span)) = (&self.raw, self.span()) { + context = true; + + let (line, column) = translate_position(raw.as_bytes(), span.start); + let line_num = line + 1; + let col_num = column + 1; + let gutter = line_num.to_string().len(); + let content = raw.split('\n').nth(line).expect("valid line number"); + let highlight_len = span.end - span.start; + // Allow highlight to go one past the line + let highlight_len = highlight_len.min(content.len().saturating_sub(column)); + + writeln!( + f, + "TOML parse error at line {}, column {}", + line_num, col_num + )?; + // | + for _ in 0..=gutter { + write!(f, " ")?; + } + writeln!(f, "|")?; + + // 1 | 00:32:00.a999999 + write!(f, "{} | ", line_num)?; + writeln!(f, "{}", content)?; + + // | ^ + for _ in 0..=gutter { + write!(f, " ")?; + } + write!(f, "|")?; + for _ in 0..=column { + write!(f, " ")?; + } + // The span will be empty at eof, so we need to make sure we always print at least + // one `^` + write!(f, "^")?; + for _ in 1..highlight_len { + write!(f, "^")?; + } + writeln!(f)?; + } + writeln!(f, "{}", self.message)?; + if !context && !self.keys.is_empty() { + writeln!(f, "in `{}`", self.keys.join("."))?; + } + + Ok(()) + } +} + +impl StdError for TomlError { + fn description(&self) -> &'static str { + "TOML parse error" + } +} + +fn translate_position(input: &[u8], index: usize) -> (usize, usize) { + if input.is_empty() { + return (0, index); + } + + let safe_index = index.min(input.len() - 1); + let column_offset = index - safe_index; + let index = safe_index; + + let nl = input[0..index] + .iter() + .rev() + .enumerate() + .find(|(_, b)| **b == b'\n') + .map(|(nl, _)| index - nl - 1); + let line_start = match nl { + Some(nl) => nl + 1, + None => 0, + }; + let line = input[0..line_start].iter().filter(|b| **b == b'\n').count(); + + let column = std::str::from_utf8(&input[line_start..=index]) + .map(|s| s.chars().count() - 1) + .unwrap_or_else(|_| index - line_start); + let column = column + column_offset; + + (line, column) +} + +#[cfg(test)] +mod test_translate_position { + use super::*; + + #[test] + fn empty() { + let input = b""; + let index = 0; + let position = translate_position(&input[..], index); + assert_eq!(position, (0, 0)); + } + + #[test] + fn start() { + let input = b"Hello"; + let index = 0; + let position = translate_position(&input[..], index); + assert_eq!(position, (0, 0)); + } + + #[test] + fn end() { + let input = b"Hello"; + let index = input.len() - 1; + let position = translate_position(&input[..], index); + assert_eq!(position, (0, input.len() - 1)); + } + + #[test] + fn after() { + let input = b"Hello"; + let index = input.len(); + let position = translate_position(&input[..], index); + assert_eq!(position, (0, input.len())); + } + + #[test] + fn first_line() { + let input = b"Hello\nWorld\n"; + let index = 2; + let position = translate_position(&input[..], index); + assert_eq!(position, (0, 2)); + } + + #[test] + fn end_of_line() { + let input = b"Hello\nWorld\n"; + let index = 5; + let position = translate_position(&input[..], index); + assert_eq!(position, (0, 5)); + } + + #[test] + fn start_of_second_line() { + let input = b"Hello\nWorld\n"; + let index = 6; + let position = translate_position(&input[..], index); + assert_eq!(position, (1, 0)); + } + + #[test] + fn second_line() { + let input = b"Hello\nWorld\n"; + let index = 8; + let position = translate_position(&input[..], index); + assert_eq!(position, (1, 2)); + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/index.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/index.rs new file mode 100644 index 000000000000..35dcc1462703 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/index.rs @@ -0,0 +1,142 @@ +use std::ops; + +use crate::key::Key; +use crate::DocumentMut; +use crate::{value, InlineTable, Item, Table, Value}; + +// copied from +// https://github.com/serde-rs/json/blob/master/src/value/index.rs + +pub trait Index: crate::private::Sealed { + #[doc(hidden)] + fn index<'v>(&self, val: &'v Item) -> Option<&'v Item>; + #[doc(hidden)] + fn index_mut<'v>(&self, val: &'v mut Item) -> Option<&'v mut Item>; +} + +impl Index for usize { + fn index<'v>(&self, v: &'v Item) -> Option<&'v Item> { + match *v { + Item::ArrayOfTables(ref aot) => aot.values.get(*self), + Item::Value(ref a) if a.is_array() => a.as_array().and_then(|a| a.values.get(*self)), + _ => None, + } + } + fn index_mut<'v>(&self, v: &'v mut Item) -> Option<&'v mut Item> { + match *v { + Item::ArrayOfTables(ref mut vec) => vec.values.get_mut(*self), + Item::Value(ref mut a) => a.as_array_mut().and_then(|a| a.values.get_mut(*self)), + _ => None, + } + } +} + +impl Index for str { + fn index<'v>(&self, v: &'v Item) -> Option<&'v Item> { + match *v { + Item::Table(ref t) => t.get(self), + Item::Value(ref v) => v + .as_inline_table() + .and_then(|t| t.items.get(self)) + .and_then(|value| if !value.is_none() { Some(value) } else { None }), + _ => None, + } + } + fn index_mut<'v>(&self, v: &'v mut Item) -> Option<&'v mut Item> { + if let Item::None = *v { + let mut t = InlineTable::default(); + t.items.insert(Key::new(self), Item::None); + *v = value(Value::InlineTable(t)); + } + match *v { + Item::Table(ref mut t) => Some(t.entry(self).or_insert(Item::None)), + Item::Value(ref mut v) => v + .as_inline_table_mut() + .map(|t| t.items.entry(Key::new(self)).or_insert_with(|| Item::None)), + _ => None, + } + } +} + +impl Index for String { + fn index<'v>(&self, v: &'v Item) -> Option<&'v Item> { + self[..].index(v) + } + fn index_mut<'v>(&self, v: &'v mut Item) -> Option<&'v mut Item> { + self[..].index_mut(v) + } +} + +impl<'a, T: ?Sized> Index for &'a T +where + T: Index, +{ + fn index<'v>(&self, v: &'v Item) -> Option<&'v Item> { + (**self).index(v) + } + fn index_mut<'v>(&self, v: &'v mut Item) -> Option<&'v mut Item> { + (**self).index_mut(v) + } +} + +impl ops::Index for Item +where + I: Index, +{ + type Output = Item; + + fn index(&self, index: I) -> &Item { + index.index(self).expect("index not found") + } +} + +impl ops::IndexMut for Item +where + I: Index, +{ + fn index_mut(&mut self, index: I) -> &mut Item { + index.index_mut(self).expect("index not found") + } +} + +impl<'s> ops::Index<&'s str> for Table { + type Output = Item; + + fn index(&self, key: &'s str) -> &Item { + self.get(key).expect("index not found") + } +} + +impl<'s> ops::IndexMut<&'s str> for Table { + fn index_mut(&mut self, key: &'s str) -> &mut Item { + self.entry(key).or_insert(Item::None) + } +} + +impl<'s> ops::Index<&'s str> for InlineTable { + type Output = Value; + + fn index(&self, key: &'s str) -> &Value { + self.get(key).expect("index not found") + } +} + +impl<'s> ops::IndexMut<&'s str> for InlineTable { + fn index_mut(&mut self, key: &'s str) -> &mut Value { + self.get_mut(key).expect("index not found") + } +} + +impl<'s> ops::Index<&'s str> for DocumentMut { + type Output = Item; + + fn index(&self, key: &'s str) -> &Item { + self.root.index(key) + } +} + +impl<'s> ops::IndexMut<&'s str> for DocumentMut { + fn index_mut(&mut self, key: &'s str) -> &mut Item { + self.root.index_mut(key) + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/inline_table.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/inline_table.rs new file mode 100644 index 000000000000..9b8e9c42aaf5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/inline_table.rs @@ -0,0 +1,745 @@ +use std::iter::FromIterator; + +use crate::key::Key; +use crate::repr::Decor; +use crate::table::{Iter, IterMut, KeyValuePairs, TableLike}; +use crate::{InternalString, Item, KeyMut, RawString, Table, Value}; + +/// Type representing a TOML inline table, +/// payload of the `Value::InlineTable` variant +#[derive(Debug, Default, Clone)] +pub struct InlineTable { + // `preamble` represents whitespaces in an empty table + preamble: RawString, + // Whether to hide an empty table + pub(crate) implicit: bool, + // prefix before `{` and suffix after `}` + decor: Decor, + pub(crate) span: Option>, + // whether this is a proxy for dotted keys + dotted: bool, + pub(crate) items: KeyValuePairs, +} + +/// Constructors +/// +/// See also `FromIterator` +impl InlineTable { + /// Creates an empty table. + pub fn new() -> Self { + Default::default() + } + + pub(crate) fn with_pairs(items: KeyValuePairs) -> Self { + Self { + items, + ..Default::default() + } + } + + /// Convert to a table + pub fn into_table(self) -> Table { + let mut t = Table::with_pairs(self.items); + t.fmt(); + t + } +} + +/// Formatting +impl InlineTable { + /// Get key/values for values that are visually children of this table + /// + /// For example, this will return dotted keys + pub fn get_values(&self) -> Vec<(Vec<&Key>, &Value)> { + let mut values = Vec::new(); + let root = Vec::new(); + self.append_values(&root, &mut values); + values + } + + pub(crate) fn append_values<'s>( + &'s self, + parent: &[&'s Key], + values: &mut Vec<(Vec<&'s Key>, &'s Value)>, + ) { + for (key, value) in self.items.iter() { + let mut path = parent.to_vec(); + path.push(key); + match value { + Item::Value(Value::InlineTable(table)) if table.is_dotted() => { + table.append_values(&path, values); + } + Item::Value(value) => { + values.push((path, value)); + } + _ => {} + } + } + } + + /// Auto formats the table. + pub fn fmt(&mut self) { + decorate_inline_table(self); + } + + /// Sorts the key/value pairs by key. + pub fn sort_values(&mut self) { + // Assuming standard tables have their position set and this won't negatively impact them + self.items.sort_keys(); + for value in self.items.values_mut() { + match value { + Item::Value(Value::InlineTable(table)) if table.is_dotted() => { + table.sort_values(); + } + _ => {} + } + } + } + + /// Sort Key/Value Pairs of the table using the using the comparison function `compare`. + /// + /// The comparison function receives two key and value pairs to compare (you can sort by keys or + /// values or their combination as needed). + pub fn sort_values_by(&mut self, mut compare: F) + where + F: FnMut(&Key, &Value, &Key, &Value) -> std::cmp::Ordering, + { + self.sort_values_by_internal(&mut compare); + } + + fn sort_values_by_internal(&mut self, compare: &mut F) + where + F: FnMut(&Key, &Value, &Key, &Value) -> std::cmp::Ordering, + { + let modified_cmp = + |key1: &Key, val1: &Item, key2: &Key, val2: &Item| -> std::cmp::Ordering { + match (val1.as_value(), val2.as_value()) { + (Some(v1), Some(v2)) => compare(key1, v1, key2, v2), + (Some(_), None) => std::cmp::Ordering::Greater, + (None, Some(_)) => std::cmp::Ordering::Less, + (None, None) => std::cmp::Ordering::Equal, + } + }; + + self.items.sort_by(modified_cmp); + for value in self.items.values_mut() { + match value { + Item::Value(Value::InlineTable(table)) if table.is_dotted() => { + table.sort_values_by_internal(compare); + } + _ => {} + } + } + } + + /// If a table has no key/value pairs and implicit, it will not be displayed. + /// + /// # Examples + /// + /// ```notrust + /// [target."x86_64/windows.json".dependencies] + /// ``` + /// + /// In the document above, tables `target` and `target."x86_64/windows.json"` are implicit. + /// + /// ``` + /// # #[cfg(feature = "parse")] { + /// # #[cfg(feature = "display")] { + /// use toml_edit::DocumentMut; + /// let mut doc = "[a]\n[a.b]\n".parse::().expect("invalid toml"); + /// + /// doc["a"].as_table_mut().unwrap().set_implicit(true); + /// assert_eq!(doc.to_string(), "[a.b]\n"); + /// # } + /// # } + /// ``` + pub(crate) fn set_implicit(&mut self, implicit: bool) { + self.implicit = implicit; + } + + /// If a table has no key/value pairs and implicit, it will not be displayed. + pub(crate) fn is_implicit(&self) -> bool { + self.implicit + } + + /// Change this table's dotted status + pub fn set_dotted(&mut self, yes: bool) { + self.dotted = yes; + } + + /// Check if this is a wrapper for dotted keys, rather than a standard table + pub fn is_dotted(&self) -> bool { + self.dotted + } + + /// Returns the surrounding whitespace + pub fn decor_mut(&mut self) -> &mut Decor { + &mut self.decor + } + + /// Returns the surrounding whitespace + pub fn decor(&self) -> &Decor { + &self.decor + } + + /// Returns an accessor to a key's formatting + pub fn key(&self, key: &str) -> Option<&'_ Key> { + self.items.get_full(key).map(|(_, key, _)| key) + } + + /// Returns an accessor to a key's formatting + pub fn key_mut(&mut self, key: &str) -> Option> { + use indexmap::map::MutableKeys; + self.items + .get_full_mut2(key) + .map(|(_, key, _)| key.as_mut()) + } + + /// Returns the decor associated with a given key of the table. + #[deprecated(since = "0.21.1", note = "Replaced with `key_mut`")] + pub fn key_decor_mut(&mut self, key: &str) -> Option<&mut Decor> { + #![allow(deprecated)] + use indexmap::map::MutableKeys; + self.items + .get_full_mut2(key) + .map(|(_, key, _)| key.leaf_decor_mut()) + } + + /// Returns the decor associated with a given key of the table. + #[deprecated(since = "0.21.1", note = "Replaced with `key_mut`")] + pub fn key_decor(&self, key: &str) -> Option<&Decor> { + #![allow(deprecated)] + self.items.get_full(key).map(|(_, key, _)| key.leaf_decor()) + } + + /// Set whitespace after before element + pub fn set_preamble(&mut self, preamble: impl Into) { + self.preamble = preamble.into(); + } + + /// Whitespace after before element + pub fn preamble(&self) -> &RawString { + &self.preamble + } + + /// The location within the original document + /// + /// This generally requires an [`ImDocument`][crate::ImDocument]. + pub fn span(&self) -> Option> { + self.span.clone() + } + + pub(crate) fn despan(&mut self, input: &str) { + use indexmap::map::MutableKeys; + self.span = None; + self.decor.despan(input); + self.preamble.despan(input); + for (key, value) in self.items.iter_mut2() { + key.despan(input); + value.despan(input); + } + } +} + +impl InlineTable { + /// Returns an iterator over key/value pairs. + pub fn iter(&self) -> InlineTableIter<'_> { + Box::new( + self.items + .iter() + .filter(|(_, value)| !value.is_none()) + .map(|(key, value)| (key.get(), value.as_value().unwrap())), + ) + } + + /// Returns an iterator over key/value pairs. + pub fn iter_mut(&mut self) -> InlineTableIterMut<'_> { + use indexmap::map::MutableKeys; + Box::new( + self.items + .iter_mut2() + .filter(|(_, value)| value.is_value()) + .map(|(key, value)| (key.as_mut(), value.as_value_mut().unwrap())), + ) + } + + /// Returns the number of key/value pairs. + pub fn len(&self) -> usize { + self.iter().count() + } + + /// Returns true if the table is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Clears the table, removing all key-value pairs. Keeps the allocated memory for reuse. + pub fn clear(&mut self) { + self.items.clear(); + } + + /// Gets the given key's corresponding entry in the Table for in-place manipulation. + pub fn entry(&'_ mut self, key: impl Into) -> InlineEntry<'_> { + match self.items.entry(key.into().into()) { + indexmap::map::Entry::Occupied(mut entry) => { + // Ensure it is a `Value` to simplify `InlineOccupiedEntry`'s code. + let scratch = std::mem::take(entry.get_mut()); + let scratch = Item::Value( + scratch + .into_value() + // HACK: `Item::None` is a corner case of a corner case, let's just pick a + // "safe" value + .unwrap_or_else(|_| Value::InlineTable(Default::default())), + ); + *entry.get_mut() = scratch; + + InlineEntry::Occupied(InlineOccupiedEntry { entry }) + } + indexmap::map::Entry::Vacant(entry) => InlineEntry::Vacant(InlineVacantEntry { entry }), + } + } + + /// Gets the given key's corresponding entry in the Table for in-place manipulation. + pub fn entry_format<'a>(&'a mut self, key: &Key) -> InlineEntry<'a> { + // Accept a `&Key` to be consistent with `entry` + match self.items.entry(key.clone()) { + indexmap::map::Entry::Occupied(mut entry) => { + // Ensure it is a `Value` to simplify `InlineOccupiedEntry`'s code. + let scratch = std::mem::take(entry.get_mut()); + let scratch = Item::Value( + scratch + .into_value() + // HACK: `Item::None` is a corner case of a corner case, let's just pick a + // "safe" value + .unwrap_or_else(|_| Value::InlineTable(Default::default())), + ); + *entry.get_mut() = scratch; + + InlineEntry::Occupied(InlineOccupiedEntry { entry }) + } + indexmap::map::Entry::Vacant(entry) => InlineEntry::Vacant(InlineVacantEntry { entry }), + } + } + /// Return an optional reference to the value at the given the key. + pub fn get(&self, key: &str) -> Option<&Value> { + self.items.get(key).and_then(|value| value.as_value()) + } + + /// Return an optional mutable reference to the value at the given the key. + pub fn get_mut(&mut self, key: &str) -> Option<&mut Value> { + self.items + .get_mut(key) + .and_then(|value| value.as_value_mut()) + } + + /// Return references to the key-value pair stored for key, if it is present, else None. + pub fn get_key_value<'a>(&'a self, key: &str) -> Option<(&'a Key, &'a Item)> { + self.items.get_full(key).and_then(|(_, key, value)| { + if !value.is_none() { + Some((key, value)) + } else { + None + } + }) + } + + /// Return mutable references to the key-value pair stored for key, if it is present, else None. + pub fn get_key_value_mut<'a>(&'a mut self, key: &str) -> Option<(KeyMut<'a>, &'a mut Item)> { + use indexmap::map::MutableKeys; + self.items.get_full_mut2(key).and_then(|(_, key, value)| { + if !value.is_none() { + Some((key.as_mut(), value)) + } else { + None + } + }) + } + + /// Returns true if the table contains given key. + pub fn contains_key(&self, key: &str) -> bool { + if let Some(value) = self.items.get(key) { + value.is_value() + } else { + false + } + } + + /// Inserts a key/value pair if the table does not contain the key. + /// Returns a mutable reference to the corresponding value. + pub fn get_or_insert>( + &mut self, + key: impl Into, + value: V, + ) -> &mut Value { + let key = key.into(); + self.items + .entry(Key::new(key)) + .or_insert(Item::Value(value.into())) + .as_value_mut() + .expect("non-value type in inline table") + } + + /// Inserts a key-value pair into the map. + pub fn insert(&mut self, key: impl Into, value: Value) -> Option { + use indexmap::map::MutableEntryKey; + let key = Key::new(key); + let value = Item::Value(value); + match self.items.entry(key.clone()) { + indexmap::map::Entry::Occupied(mut entry) => { + entry.key_mut().fmt(); + let old = std::mem::replace(entry.get_mut(), value); + old.into_value().ok() + } + indexmap::map::Entry::Vacant(entry) => { + entry.insert(value); + None + } + } + } + + /// Inserts a key-value pair into the map. + pub fn insert_formatted(&mut self, key: &Key, value: Value) -> Option { + use indexmap::map::MutableEntryKey; + let value = Item::Value(value); + match self.items.entry(key.clone()) { + indexmap::map::Entry::Occupied(mut entry) => { + *entry.key_mut() = key.clone(); + let old = std::mem::replace(entry.get_mut(), value); + old.into_value().ok() + } + indexmap::map::Entry::Vacant(entry) => { + entry.insert(value); + None + } + } + } + + /// Removes an item given the key. + pub fn remove(&mut self, key: &str) -> Option { + self.items + .shift_remove(key) + .and_then(|value| value.into_value().ok()) + } + + /// Removes a key from the map, returning the stored key and value if the key was previously in the map. + pub fn remove_entry(&mut self, key: &str) -> Option<(Key, Value)> { + self.items + .shift_remove_entry(key) + .and_then(|(key, value)| Some((key, value.into_value().ok()?))) + } + + /// Retains only the elements specified by the `keep` predicate. + /// + /// In other words, remove all pairs `(key, value)` for which + /// `keep(&key, &mut value)` returns `false`. + /// + /// The elements are visited in iteration order. + pub fn retain(&mut self, mut keep: F) + where + F: FnMut(&str, &mut Value) -> bool, + { + self.items.retain(|key, item| { + item.as_value_mut() + .map(|value| keep(key, value)) + .unwrap_or(false) + }); + } +} + +#[cfg(feature = "display")] +impl std::fmt::Display for InlineTable { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + crate::encode::encode_table(self, f, None, ("", "")) + } +} + +impl, V: Into> Extend<(K, V)> for InlineTable { + fn extend>(&mut self, iter: T) { + for (key, value) in iter { + let key = key.into(); + let value = Item::Value(value.into()); + self.items.insert(key, value); + } + } +} + +impl, V: Into> FromIterator<(K, V)> for InlineTable { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut table = InlineTable::new(); + table.extend(iter); + table + } +} + +impl IntoIterator for InlineTable { + type Item = (InternalString, Value); + type IntoIter = InlineTableIntoIter; + + fn into_iter(self) -> Self::IntoIter { + Box::new( + self.items + .into_iter() + .filter(|(_, value)| value.is_value()) + .map(|(key, value)| (key.into(), value.into_value().unwrap())), + ) + } +} + +impl<'s> IntoIterator for &'s InlineTable { + type Item = (&'s str, &'s Value); + type IntoIter = InlineTableIter<'s>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +fn decorate_inline_table(table: &mut InlineTable) { + use indexmap::map::MutableKeys; + for (mut key, value) in table + .items + .iter_mut2() + .filter(|(_, value)| value.is_value()) + .map(|(key, value)| (key.as_mut(), value.as_value_mut().unwrap())) + { + key.leaf_decor_mut().clear(); + key.dotted_decor_mut().clear(); + value.decor_mut().clear(); + } +} + +/// An owned iterator type over key/value pairs of an inline table. +pub type InlineTableIntoIter = Box>; +/// An iterator type over key/value pairs of an inline table. +pub type InlineTableIter<'a> = Box + 'a>; +/// A mutable iterator type over key/value pairs of an inline table. +pub type InlineTableIterMut<'a> = Box, &'a mut Value)> + 'a>; + +impl TableLike for InlineTable { + fn iter(&self) -> Iter<'_> { + Box::new(self.items.iter().map(|(key, value)| (key.get(), value))) + } + fn iter_mut(&mut self) -> IterMut<'_> { + use indexmap::map::MutableKeys; + Box::new( + self.items + .iter_mut2() + .map(|(key, value)| (key.as_mut(), value)), + ) + } + fn clear(&mut self) { + self.clear(); + } + fn entry<'a>(&'a mut self, key: &str) -> crate::Entry<'a> { + // Accept a `&str` rather than an owned type to keep `InternalString`, well, internal + match self.items.entry(key.into()) { + indexmap::map::Entry::Occupied(entry) => { + crate::Entry::Occupied(crate::OccupiedEntry { entry }) + } + indexmap::map::Entry::Vacant(entry) => { + crate::Entry::Vacant(crate::VacantEntry { entry }) + } + } + } + fn entry_format<'a>(&'a mut self, key: &Key) -> crate::Entry<'a> { + // Accept a `&Key` to be consistent with `entry` + match self.items.entry(key.get().into()) { + indexmap::map::Entry::Occupied(entry) => { + crate::Entry::Occupied(crate::OccupiedEntry { entry }) + } + indexmap::map::Entry::Vacant(entry) => { + crate::Entry::Vacant(crate::VacantEntry { entry }) + } + } + } + fn get<'s>(&'s self, key: &str) -> Option<&'s Item> { + self.items.get(key) + } + fn get_mut<'s>(&'s mut self, key: &str) -> Option<&'s mut Item> { + self.items.get_mut(key) + } + fn get_key_value<'a>(&'a self, key: &str) -> Option<(&'a Key, &'a Item)> { + self.get_key_value(key) + } + fn get_key_value_mut<'a>(&'a mut self, key: &str) -> Option<(KeyMut<'a>, &'a mut Item)> { + self.get_key_value_mut(key) + } + fn contains_key(&self, key: &str) -> bool { + self.contains_key(key) + } + fn insert(&mut self, key: &str, value: Item) -> Option { + self.insert(key, value.into_value().unwrap()) + .map(Item::Value) + } + fn remove(&mut self, key: &str) -> Option { + self.remove(key).map(Item::Value) + } + + fn get_values(&self) -> Vec<(Vec<&Key>, &Value)> { + self.get_values() + } + fn fmt(&mut self) { + self.fmt(); + } + fn sort_values(&mut self) { + self.sort_values(); + } + fn set_dotted(&mut self, yes: bool) { + self.set_dotted(yes); + } + fn is_dotted(&self) -> bool { + self.is_dotted() + } + + fn key(&self, key: &str) -> Option<&'_ Key> { + self.key(key) + } + fn key_mut(&mut self, key: &str) -> Option> { + self.key_mut(key) + } + fn key_decor_mut(&mut self, key: &str) -> Option<&mut Decor> { + #![allow(deprecated)] + self.key_decor_mut(key) + } + fn key_decor(&self, key: &str) -> Option<&Decor> { + #![allow(deprecated)] + self.key_decor(key) + } +} + +// `{ key1 = value1, ... }` +pub(crate) const DEFAULT_INLINE_KEY_DECOR: (&str, &str) = (" ", " "); + +/// A view into a single location in a map, which may be vacant or occupied. +pub enum InlineEntry<'a> { + /// An occupied Entry. + Occupied(InlineOccupiedEntry<'a>), + /// A vacant Entry. + Vacant(InlineVacantEntry<'a>), +} + +impl<'a> InlineEntry<'a> { + /// Returns the entry key + /// + /// # Examples + /// + /// ``` + /// use toml_edit::Table; + /// + /// let mut map = Table::new(); + /// + /// assert_eq!("hello", map.entry("hello").key()); + /// ``` + pub fn key(&self) -> &str { + match self { + InlineEntry::Occupied(e) => e.key(), + InlineEntry::Vacant(e) => e.key(), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + pub fn or_insert(self, default: Value) -> &'a mut Value { + match self { + InlineEntry::Occupied(entry) => entry.into_mut(), + InlineEntry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + pub fn or_insert_with Value>(self, default: F) -> &'a mut Value { + match self { + InlineEntry::Occupied(entry) => entry.into_mut(), + InlineEntry::Vacant(entry) => entry.insert(default()), + } + } +} + +/// A view into a single occupied location in a `IndexMap`. +pub struct InlineOccupiedEntry<'a> { + entry: indexmap::map::OccupiedEntry<'a, Key, Item>, +} + +impl<'a> InlineOccupiedEntry<'a> { + /// Gets a reference to the entry key + /// + /// # Examples + /// + /// ``` + /// use toml_edit::Table; + /// + /// let mut map = Table::new(); + /// + /// assert_eq!("foo", map.entry("foo").key()); + /// ``` + pub fn key(&self) -> &str { + self.entry.key().get() + } + + /// Gets a mutable reference to the entry key + pub fn key_mut(&mut self) -> KeyMut<'_> { + use indexmap::map::MutableEntryKey; + self.entry.key_mut().as_mut() + } + + /// Gets a reference to the value in the entry. + pub fn get(&self) -> &Value { + self.entry.get().as_value().unwrap() + } + + /// Gets a mutable reference to the value in the entry. + pub fn get_mut(&mut self) -> &mut Value { + self.entry.get_mut().as_value_mut().unwrap() + } + + /// Converts the `OccupiedEntry` into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself + pub fn into_mut(self) -> &'a mut Value { + self.entry.into_mut().as_value_mut().unwrap() + } + + /// Sets the value of the entry, and returns the entry's old value + pub fn insert(&mut self, value: Value) -> Value { + let value = Item::Value(value); + self.entry.insert(value).into_value().unwrap() + } + + /// Takes the value out of the entry, and returns it + pub fn remove(self) -> Value { + self.entry.shift_remove().into_value().unwrap() + } +} + +/// A view into a single empty location in a `IndexMap`. +pub struct InlineVacantEntry<'a> { + entry: indexmap::map::VacantEntry<'a, Key, Item>, +} + +impl<'a> InlineVacantEntry<'a> { + /// Gets a reference to the entry key + /// + /// # Examples + /// + /// ``` + /// use toml_edit::Table; + /// + /// let mut map = Table::new(); + /// + /// assert_eq!("foo", map.entry("foo").key()); + /// ``` + pub fn key(&self) -> &str { + self.entry.key().get() + } + + /// Sets the value of the entry with the `VacantEntry`'s key, + /// and returns a mutable reference to it + pub fn insert(self, value: Value) -> &'a mut Value { + let entry = self.entry; + let value = Item::Value(value); + entry.insert(value).as_value_mut().unwrap() + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/internal_string.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/internal_string.rs new file mode 100644 index 000000000000..ca4c185fa84b --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/internal_string.rs @@ -0,0 +1,183 @@ +use std::borrow::Borrow; +use std::str::FromStr; + +/// Opaque string storage internal to `toml_edit` +#[derive(Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct InternalString(Inner); + +#[cfg(feature = "perf")] +type Inner = kstring::KString; +#[cfg(not(feature = "perf"))] +type Inner = String; + +impl InternalString { + /// Create an empty string + pub fn new() -> Self { + InternalString(Inner::new()) + } + + /// Access the underlying string + #[inline] + pub fn as_str(&self) -> &str { + self.0.as_str() + } +} + +impl std::fmt::Debug for InternalString { + #[inline] + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + self.0.fmt(formatter) + } +} + +impl std::ops::Deref for InternalString { + type Target = str; + + #[inline] + fn deref(&self) -> &str { + self.as_str() + } +} + +impl Borrow for InternalString { + #[inline] + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl AsRef for InternalString { + #[inline] + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl From<&str> for InternalString { + #[inline] + fn from(s: &str) -> Self { + #[cfg(feature = "perf")] + let inner = kstring::KString::from_ref(s); + #[cfg(not(feature = "perf"))] + let inner = String::from(s); + + InternalString(inner) + } +} + +impl From for InternalString { + #[inline] + fn from(s: String) -> Self { + #[allow(clippy::useless_conversion)] // handle any string type + InternalString(s.into()) + } +} + +impl From<&String> for InternalString { + #[inline] + fn from(s: &String) -> Self { + InternalString(s.into()) + } +} + +impl From<&InternalString> for InternalString { + #[inline] + fn from(s: &InternalString) -> Self { + s.clone() + } +} + +impl From> for InternalString { + #[inline] + fn from(s: Box) -> Self { + InternalString(s.into()) + } +} + +impl FromStr for InternalString { + type Err = core::convert::Infallible; + #[inline] + fn from_str(s: &str) -> Result { + Ok(Self::from(s)) + } +} + +impl std::fmt::Display for InternalString { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.as_str().fmt(f) + } +} + +#[cfg(feature = "serde")] +impl serde::Serialize for InternalString { + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(self.as_str()) + } +} + +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for InternalString { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_string(StringVisitor) + } +} + +#[cfg(feature = "serde")] +struct StringVisitor; + +#[cfg(feature = "serde")] +impl<'de> serde::de::Visitor<'de> for StringVisitor { + type Value = InternalString; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("a string") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + Ok(InternalString::from(v)) + } + + fn visit_string(self, v: String) -> Result + where + E: serde::de::Error, + { + Ok(InternalString::from(v)) + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: serde::de::Error, + { + match std::str::from_utf8(v) { + Ok(s) => Ok(InternalString::from(s)), + Err(_) => Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Bytes(v), + &self, + )), + } + } + + fn visit_byte_buf(self, v: Vec) -> Result + where + E: serde::de::Error, + { + match String::from_utf8(v) { + Ok(s) => Ok(InternalString::from(s)), + Err(e) => Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Bytes(&e.into_bytes()), + &self, + )), + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/item.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/item.rs new file mode 100644 index 000000000000..75bfcdc117c5 --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/item.rs @@ -0,0 +1,414 @@ +use std::str::FromStr; + +use toml_datetime::Datetime; + +use crate::array_of_tables::ArrayOfTables; +use crate::table::TableLike; +use crate::{Array, InlineTable, Table, Value}; + +/// Type representing either a value, a table, an array of tables, or none. +#[derive(Debug, Default)] +pub enum Item { + /// Type representing none. + #[default] + None, + /// Type representing value. + Value(Value), + /// Type representing table. + Table(Table), + /// Type representing array of tables. + ArrayOfTables(ArrayOfTables), +} + +impl Item { + /// Sets `self` to the given item if `self` is none and + /// returns a mutable reference to `self`. + pub fn or_insert(&mut self, item: Item) -> &mut Item { + if self.is_none() { + *self = item; + } + self + } +} + +// TODO: This should be generated by macro or derive +/// Downcasting +impl Item { + /// Text description of value type + pub fn type_name(&self) -> &'static str { + match self { + Item::None => "none", + Item::Value(v) => v.type_name(), + Item::Table(..) => "table", + Item::ArrayOfTables(..) => "array of tables", + } + } + + /// Index into a TOML array or map. A string index can be used to access a + /// value in a map, and a usize index can be used to access an element of an + /// array. + /// + /// Returns `None` if: + /// - The type of `self` does not match the type of the + /// index, for example if the index is a string and `self` is an array or a + /// number. + /// - The given key does not exist in the map + /// or the given index is not within the bounds of the array. + pub fn get(&self, index: I) -> Option<&Item> { + index.index(self) + } + + /// Mutably index into a TOML array or map. A string index can be used to + /// access a value in a map, and a usize index can be used to access an + /// element of an array. + /// + /// Returns `None` if: + /// - The type of `self` does not match the type of the + /// index, for example if the index is a string and `self` is an array or a + /// number. + /// - The given key does not exist in the map + /// or the given index is not within the bounds of the array. + pub fn get_mut(&mut self, index: I) -> Option<&mut Item> { + index.index_mut(self) + } + + /// Casts `self` to value. + pub fn as_value(&self) -> Option<&Value> { + match *self { + Item::Value(ref v) => Some(v), + _ => None, + } + } + /// Casts `self` to table. + pub fn as_table(&self) -> Option<&Table> { + match *self { + Item::Table(ref t) => Some(t), + _ => None, + } + } + /// Casts `self` to array of tables. + pub fn as_array_of_tables(&self) -> Option<&ArrayOfTables> { + match *self { + Item::ArrayOfTables(ref a) => Some(a), + _ => None, + } + } + /// Casts `self` to mutable value. + pub fn as_value_mut(&mut self) -> Option<&mut Value> { + match *self { + Item::Value(ref mut v) => Some(v), + _ => None, + } + } + /// Casts `self` to mutable table. + pub fn as_table_mut(&mut self) -> Option<&mut Table> { + match *self { + Item::Table(ref mut t) => Some(t), + _ => None, + } + } + /// Casts `self` to mutable array of tables. + pub fn as_array_of_tables_mut(&mut self) -> Option<&mut ArrayOfTables> { + match *self { + Item::ArrayOfTables(ref mut a) => Some(a), + _ => None, + } + } + /// Casts `self` to value. + pub fn into_value(self) -> Result { + match self { + Item::None => Err(self), + Item::Value(v) => Ok(v), + Item::Table(v) => { + let v = v.into_inline_table(); + Ok(Value::InlineTable(v)) + } + Item::ArrayOfTables(v) => { + let v = v.into_array(); + Ok(Value::Array(v)) + } + } + } + /// In-place convert to a value + pub fn make_value(&mut self) { + let other = std::mem::take(self); + let other = other.into_value().map(Item::Value).unwrap_or(Item::None); + *self = other; + } + /// Casts `self` to table. + pub fn into_table(self) -> Result { + match self { + Item::Table(t) => Ok(t), + Item::Value(Value::InlineTable(t)) => Ok(t.into_table()), + _ => Err(self), + } + } + /// Casts `self` to array of tables. + pub fn into_array_of_tables(self) -> Result { + match self { + Item::ArrayOfTables(a) => Ok(a), + Item::Value(Value::Array(a)) => { + if a.is_empty() { + Err(Item::Value(Value::Array(a))) + } else if a.iter().all(|v| v.is_inline_table()) { + let mut aot = ArrayOfTables::new(); + aot.values = a.values; + for value in aot.values.iter_mut() { + value.make_item(); + } + Ok(aot) + } else { + Err(Item::Value(Value::Array(a))) + } + } + _ => Err(self), + } + } + // Starting private because the name is unclear + pub(crate) fn make_item(&mut self) { + let other = std::mem::take(self); + let other = match other.into_table().map(Item::Table) { + Ok(i) => i, + Err(i) => i, + }; + let other = match other.into_array_of_tables().map(Item::ArrayOfTables) { + Ok(i) => i, + Err(i) => i, + }; + *self = other; + } + /// Returns true if `self` is a value. + pub fn is_value(&self) -> bool { + self.as_value().is_some() + } + /// Returns true if `self` is a table. + pub fn is_table(&self) -> bool { + self.as_table().is_some() + } + /// Returns true if `self` is an array of tables. + pub fn is_array_of_tables(&self) -> bool { + self.as_array_of_tables().is_some() + } + /// Returns true if `self` is `None`. + pub fn is_none(&self) -> bool { + matches!(*self, Item::None) + } + + // Duplicate Value downcasting API + + /// Casts `self` to integer. + pub fn as_integer(&self) -> Option { + self.as_value().and_then(Value::as_integer) + } + + /// Returns true if `self` is an integer. + pub fn is_integer(&self) -> bool { + self.as_integer().is_some() + } + + /// Casts `self` to float. + pub fn as_float(&self) -> Option { + self.as_value().and_then(Value::as_float) + } + + /// Returns true if `self` is a float. + pub fn is_float(&self) -> bool { + self.as_float().is_some() + } + + /// Casts `self` to boolean. + pub fn as_bool(&self) -> Option { + self.as_value().and_then(Value::as_bool) + } + + /// Returns true if `self` is a boolean. + pub fn is_bool(&self) -> bool { + self.as_bool().is_some() + } + + /// Casts `self` to str. + pub fn as_str(&self) -> Option<&str> { + self.as_value().and_then(Value::as_str) + } + + /// Returns true if `self` is a string. + pub fn is_str(&self) -> bool { + self.as_str().is_some() + } + + /// Casts `self` to date-time. + pub fn as_datetime(&self) -> Option<&Datetime> { + self.as_value().and_then(Value::as_datetime) + } + + /// Returns true if `self` is a date-time. + pub fn is_datetime(&self) -> bool { + self.as_datetime().is_some() + } + + /// Casts `self` to array. + pub fn as_array(&self) -> Option<&Array> { + self.as_value().and_then(Value::as_array) + } + + /// Casts `self` to mutable array. + pub fn as_array_mut(&mut self) -> Option<&mut Array> { + self.as_value_mut().and_then(Value::as_array_mut) + } + + /// Returns true if `self` is an array. + pub fn is_array(&self) -> bool { + self.as_array().is_some() + } + + /// Casts `self` to inline table. + pub fn as_inline_table(&self) -> Option<&InlineTable> { + self.as_value().and_then(Value::as_inline_table) + } + + /// Casts `self` to mutable inline table. + pub fn as_inline_table_mut(&mut self) -> Option<&mut InlineTable> { + self.as_value_mut().and_then(Value::as_inline_table_mut) + } + + /// Returns true if `self` is an inline table. + pub fn is_inline_table(&self) -> bool { + self.as_inline_table().is_some() + } + + /// Casts `self` to either a table or an inline table. + pub fn as_table_like(&self) -> Option<&dyn TableLike> { + self.as_table() + .map(|t| t as &dyn TableLike) + .or_else(|| self.as_inline_table().map(|t| t as &dyn TableLike)) + } + + /// Casts `self` to either a table or an inline table. + pub fn as_table_like_mut(&mut self) -> Option<&mut dyn TableLike> { + match self { + Item::Table(t) => Some(t as &mut dyn TableLike), + Item::Value(Value::InlineTable(t)) => Some(t as &mut dyn TableLike), + _ => None, + } + } + + /// Returns true if `self` is either a table, or an inline table. + pub fn is_table_like(&self) -> bool { + self.as_table_like().is_some() + } + + /// The location within the original document + /// + /// This generally requires an [`ImDocument`][crate::ImDocument]. + pub fn span(&self) -> Option> { + match self { + Item::None => None, + Item::Value(v) => v.span(), + Item::Table(v) => v.span(), + Item::ArrayOfTables(v) => v.span(), + } + } + + pub(crate) fn despan(&mut self, input: &str) { + match self { + Item::None => {} + Item::Value(v) => v.despan(input), + Item::Table(v) => v.despan(input), + Item::ArrayOfTables(v) => v.despan(input), + } + } +} + +impl Clone for Item { + #[inline(never)] + fn clone(&self) -> Self { + match self { + Item::None => Item::None, + Item::Value(v) => Item::Value(v.clone()), + Item::Table(v) => Item::Table(v.clone()), + Item::ArrayOfTables(v) => Item::ArrayOfTables(v.clone()), + } + } +} + +#[cfg(feature = "parse")] +impl FromStr for Item { + type Err = crate::TomlError; + + /// Parses a value from a &str + fn from_str(s: &str) -> Result { + let value = s.parse::()?; + Ok(Item::Value(value)) + } +} + +impl<'b> From<&'b Item> for Item { + fn from(s: &'b Item) -> Self { + s.clone() + } +} + +impl> From for Item { + fn from(s: V) -> Self { + Item::Value(s.into()) + } +} + +#[cfg(feature = "display")] +impl std::fmt::Display for Item { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self { + Item::None => Ok(()), + Item::Value(v) => v.fmt(f), + Item::Table(v) => v.fmt(f), + Item::ArrayOfTables(v) => v.fmt(f), + } + } +} + +/// Returns a formatted value. +/// +/// Since formatting is part of a `Value`, the right hand side of the +/// assignment needs to be decorated with a space before the value. +/// The `value` function does just that. +/// +/// # Examples +/// ```rust +/// # #[cfg(feature = "display")] { +/// # #[cfg(feature = "parse")] { +/// # use toml_edit::*; +/// let mut table = Table::default(); +/// let mut array = Array::default(); +/// array.push("hello"); +/// array.push("\\, world"); // \ is only allowed in a literal string +/// table["key1"] = value("value1"); +/// table["key2"] = value(42); +/// table["key3"] = value(array); +/// assert_eq!(table.to_string(), +/// r#"key1 = "value1" +/// key2 = 42 +/// key3 = ["hello", '\, world'] +/// "#); +/// # } +/// # } +/// ``` +pub fn value>(v: V) -> Item { + Item::Value(v.into()) +} + +/// Returns an empty table. +pub fn table() -> Item { + Item::Table(Table::new()) +} + +/// Returns an empty array of tables. +pub fn array() -> Item { + Item::ArrayOfTables(ArrayOfTables::new()) +} + +#[test] +#[cfg(feature = "parse")] +#[cfg(feature = "display")] +fn string_roundtrip() { + value("hello").to_string().parse::().unwrap(); +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/key.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/key.rs new file mode 100644 index 000000000000..314870f5267f --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/key.rs @@ -0,0 +1,449 @@ +use std::borrow::Cow; +use std::str::FromStr; + +use crate::repr::{Decor, Repr}; +use crate::InternalString; + +/// Key as part of a Key/Value Pair or a table header. +/// +/// # Examples +/// +/// ```notrust +/// [dependencies."nom"] +/// version = "5.0" +/// 'literal key' = "nonsense" +/// "basic string key" = 42 +/// ``` +/// +/// There are 3 types of keys: +/// +/// 1. Bare keys (`version` and `dependencies`) +/// +/// 2. Basic quoted keys (`"basic string key"` and `"nom"`) +/// +/// 3. Literal quoted keys (`'literal key'`) +/// +/// For details see [toml spec](https://github.com/toml-lang/toml/#keyvalue-pair). +/// +/// To parse a key use `FromStr` trait implementation: `"string".parse::()`. +#[derive(Debug)] +pub struct Key { + key: InternalString, + pub(crate) repr: Option, + pub(crate) leaf_decor: Decor, + pub(crate) dotted_decor: Decor, +} + +impl Key { + /// Create a new table key + pub fn new(key: impl Into) -> Self { + Self { + key: key.into(), + repr: None, + leaf_decor: Default::default(), + dotted_decor: Default::default(), + } + } + + /// Parse a TOML key expression + /// + /// Unlike `"".parse()`, this supports dotted keys. + #[cfg(feature = "parse")] + pub fn parse(repr: &str) -> Result, crate::TomlError> { + Self::try_parse_path(repr) + } + + pub(crate) fn with_repr_unchecked(mut self, repr: Repr) -> Self { + self.repr = Some(repr); + self + } + + /// While creating the `Key`, add `Decor` to it + #[deprecated(since = "0.21.1", note = "Replaced with `with_leaf_decor`")] + pub fn with_decor(self, decor: Decor) -> Self { + self.with_leaf_decor(decor) + } + + /// While creating the `Key`, add `Decor` to it for the line entry + pub fn with_leaf_decor(mut self, decor: Decor) -> Self { + self.leaf_decor = decor; + self + } + + /// While creating the `Key`, add `Decor` to it for between dots + pub fn with_dotted_decor(mut self, decor: Decor) -> Self { + self.dotted_decor = decor; + self + } + + /// Access a mutable proxy for the `Key`. + pub fn as_mut(&mut self) -> KeyMut<'_> { + KeyMut { key: self } + } + + /// Returns the parsed key value. + pub fn get(&self) -> &str { + &self.key + } + + /// Returns key raw representation, if available. + pub fn as_repr(&self) -> Option<&Repr> { + self.repr.as_ref() + } + + /// Returns the default raw representation. + #[cfg(feature = "display")] + pub fn default_repr(&self) -> Repr { + to_key_repr(&self.key) + } + + /// Returns a raw representation. + #[cfg(feature = "display")] + pub fn display_repr(&self) -> Cow<'_, str> { + self.as_repr() + .and_then(|r| r.as_raw().as_str()) + .map(Cow::Borrowed) + .unwrap_or_else(|| { + Cow::Owned(self.default_repr().as_raw().as_str().unwrap().to_owned()) + }) + } + + /// Returns the surrounding whitespace + #[deprecated( + since = "0.21.1", + note = "Replaced with `dotted_decor_mut`, `leaf_decor_mut" + )] + pub fn decor_mut(&mut self) -> &mut Decor { + self.leaf_decor_mut() + } + + /// Returns the surrounding whitespace for the line entry + pub fn leaf_decor_mut(&mut self) -> &mut Decor { + &mut self.leaf_decor + } + + /// Returns the surrounding whitespace for between dots + pub fn dotted_decor_mut(&mut self) -> &mut Decor { + &mut self.dotted_decor + } + + /// Returns the surrounding whitespace + #[deprecated(since = "0.21.1", note = "Replaced with `dotted_decor`, `leaf_decor")] + pub fn decor(&self) -> &Decor { + self.leaf_decor() + } + + /// Returns the surrounding whitespace for the line entry + pub fn leaf_decor(&self) -> &Decor { + &self.leaf_decor + } + + /// Returns the surrounding whitespace for between dots + pub fn dotted_decor(&self) -> &Decor { + &self.dotted_decor + } + + /// The location within the original document + /// + /// This generally requires an [`ImDocument`][crate::ImDocument]. + pub fn span(&self) -> Option> { + self.repr.as_ref().and_then(|r| r.span()) + } + + pub(crate) fn despan(&mut self, input: &str) { + self.leaf_decor.despan(input); + self.dotted_decor.despan(input); + if let Some(repr) = &mut self.repr { + repr.despan(input); + } + } + + /// Auto formats the key. + pub fn fmt(&mut self) { + self.repr = None; + self.leaf_decor.clear(); + self.dotted_decor.clear(); + } + + #[cfg(feature = "parse")] + fn try_parse_simple(s: &str) -> Result { + let mut key = crate::parser::parse_key(s)?; + key.despan(s); + Ok(key) + } + + #[cfg(feature = "parse")] + fn try_parse_path(s: &str) -> Result, crate::TomlError> { + let mut keys = crate::parser::parse_key_path(s)?; + for key in &mut keys { + key.despan(s); + } + Ok(keys) + } +} + +impl Clone for Key { + #[inline(never)] + fn clone(&self) -> Self { + Self { + key: self.key.clone(), + repr: self.repr.clone(), + leaf_decor: self.leaf_decor.clone(), + dotted_decor: self.dotted_decor.clone(), + } + } +} + +impl std::ops::Deref for Key { + type Target = str; + + fn deref(&self) -> &Self::Target { + self.get() + } +} + +impl std::borrow::Borrow for Key { + #[inline] + fn borrow(&self) -> &str { + self.get() + } +} + +impl std::hash::Hash for Key { + fn hash(&self, state: &mut H) { + self.get().hash(state); + } +} + +impl Ord for Key { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.get().cmp(other.get()) + } +} + +impl PartialOrd for Key { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Eq for Key {} + +impl PartialEq for Key { + #[inline] + fn eq(&self, other: &Key) -> bool { + PartialEq::eq(self.get(), other.get()) + } +} + +impl PartialEq for Key { + #[inline] + fn eq(&self, other: &str) -> bool { + PartialEq::eq(self.get(), other) + } +} + +impl<'s> PartialEq<&'s str> for Key { + #[inline] + fn eq(&self, other: &&str) -> bool { + PartialEq::eq(self.get(), *other) + } +} + +impl PartialEq for Key { + #[inline] + fn eq(&self, other: &String) -> bool { + PartialEq::eq(self.get(), other.as_str()) + } +} + +#[cfg(feature = "display")] +impl std::fmt::Display for Key { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + crate::encode::encode_key(self, f, None) + } +} + +#[cfg(feature = "parse")] +impl FromStr for Key { + type Err = crate::TomlError; + + /// Tries to parse a key from a &str, + /// if fails, tries as basic quoted key (surrounds with "") + /// and then literal quoted key (surrounds with '') + fn from_str(s: &str) -> Result { + Key::try_parse_simple(s) + } +} + +#[cfg(feature = "display")] +fn to_key_repr(key: &str) -> Repr { + #[cfg(feature = "parse")] + { + if key + .as_bytes() + .iter() + .copied() + .all(crate::parser::key::is_unquoted_char) + && !key.is_empty() + { + Repr::new_unchecked(key) + } else { + crate::encode::to_string_repr( + key, + Some(crate::encode::StringStyle::OnelineSingle), + None, + ) + } + } + #[cfg(not(feature = "parse"))] + { + crate::encode::to_string_repr(key, Some(crate::encode::StringStyle::OnelineSingle), None) + } +} + +impl<'b> From<&'b str> for Key { + fn from(s: &'b str) -> Self { + Key::new(s) + } +} + +impl<'b> From<&'b String> for Key { + fn from(s: &'b String) -> Self { + Key::new(s) + } +} + +impl From for Key { + fn from(s: String) -> Self { + Key::new(s) + } +} + +impl From for Key { + fn from(s: InternalString) -> Self { + Key::new(s) + } +} + +#[doc(hidden)] +impl From for InternalString { + fn from(key: Key) -> InternalString { + key.key + } +} + +/// A mutable reference to a `Key`'s formatting +#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] +pub struct KeyMut<'k> { + key: &'k mut Key, +} + +impl<'k> KeyMut<'k> { + /// Returns the parsed key value. + pub fn get(&self) -> &str { + self.key.get() + } + + /// Returns the raw representation, if available. + pub fn as_repr(&self) -> Option<&Repr> { + self.key.as_repr() + } + + /// Returns the default raw representation. + #[cfg(feature = "display")] + pub fn default_repr(&self) -> Repr { + self.key.default_repr() + } + + /// Returns a raw representation. + #[cfg(feature = "display")] + pub fn display_repr(&self) -> Cow<'_, str> { + self.key.display_repr() + } + + /// Returns the surrounding whitespace + #[deprecated( + since = "0.21.1", + note = "Replaced with `dotted_decor_mut`, `leaf_decor_mut" + )] + pub fn decor_mut(&mut self) -> &mut Decor { + #![allow(deprecated)] + self.key.decor_mut() + } + + /// Returns the surrounding whitespace for the line entry + pub fn leaf_decor_mut(&mut self) -> &mut Decor { + self.key.leaf_decor_mut() + } + + /// Returns the surrounding whitespace for between dots + pub fn dotted_decor_mut(&mut self) -> &mut Decor { + self.key.dotted_decor_mut() + } + + /// Returns the surrounding whitespace + #[deprecated(since = "0.21.1", note = "Replaced with `dotted_decor`, `leaf_decor")] + pub fn decor(&self) -> &Decor { + #![allow(deprecated)] + self.key.decor() + } + + /// Returns the surrounding whitespace for the line entry + pub fn leaf_decor(&self) -> &Decor { + self.key.leaf_decor() + } + + /// Returns the surrounding whitespace for between dots + pub fn dotted_decor(&self) -> &Decor { + self.key.dotted_decor() + } + + /// Auto formats the key. + pub fn fmt(&mut self) { + self.key.fmt(); + } +} + +impl<'k> std::ops::Deref for KeyMut<'k> { + type Target = str; + + fn deref(&self) -> &Self::Target { + self.get() + } +} + +impl<'s> PartialEq for KeyMut<'s> { + #[inline] + fn eq(&self, other: &str) -> bool { + PartialEq::eq(self.get(), other) + } +} + +impl<'s> PartialEq<&'s str> for KeyMut<'s> { + #[inline] + fn eq(&self, other: &&str) -> bool { + PartialEq::eq(self.get(), *other) + } +} + +impl<'s> PartialEq for KeyMut<'s> { + #[inline] + fn eq(&self, other: &String) -> bool { + PartialEq::eq(self.get(), other.as_str()) + } +} + +#[cfg(feature = "display")] +impl<'k> std::fmt::Display for KeyMut<'k> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.key, f) + } +} + +#[test] +#[cfg(feature = "parse")] +#[cfg(feature = "display")] +fn string_roundtrip() { + Key::new("hello").to_string().parse::().unwrap(); +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/lib.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/lib.rs new file mode 100644 index 000000000000..c47b902a53cb --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/lib.rs @@ -0,0 +1,143 @@ +//! # `toml_edit` +//! +//! This crate allows you to parse and modify toml +//! documents, while preserving comments, spaces *and +//! relative order* or items. +//! +//! If you also need the ease of a more traditional API, see the [`toml`] crate. +//! +//! # Example +//! +//! ```rust +//! # #[cfg(feature = "parse")] { +//! # #[cfg(feature = "display")] { +//! use toml_edit::{DocumentMut, value}; +//! +//! let toml = r#" +//! "hello" = 'toml!' # comment +//! ['a'.b] +//! "#; +//! let mut doc = toml.parse::().expect("invalid doc"); +//! assert_eq!(doc.to_string(), toml); +//! // let's add a new key/value pair inside a.b: c = {d = "hello"} +//! doc["a"]["b"]["c"]["d"] = value("hello"); +//! // autoformat inline table a.b.c: { d = "hello" } +//! doc["a"]["b"]["c"].as_inline_table_mut().map(|t| t.fmt()); +//! let expected = r#" +//! "hello" = 'toml!' # comment +//! ['a'.b] +//! c = { d = "hello" } +//! "#; +//! assert_eq!(doc.to_string(), expected); +//! # } +//! # } +//! ``` +//! +//! ## Controlling formatting +//! +//! By default, values are created with default formatting +//! ```rust +//! # #[cfg(feature = "display")] { +//! # #[cfg(feature = "parse")] { +//! let mut doc = toml_edit::DocumentMut::new(); +//! doc["foo"] = toml_edit::value("bar"); +//! let expected = r#"foo = "bar" +//! "#; +//! assert_eq!(doc.to_string(), expected); +//! # } +//! # } +//! ``` +//! +//! You can choose a custom TOML representation by parsing the value. +//! ```rust +//! # #[cfg(feature = "display")] { +//! # #[cfg(feature = "parse")] { +//! let mut doc = toml_edit::DocumentMut::new(); +//! doc["foo"] = "'bar'".parse::().unwrap(); +//! let expected = r#"foo = 'bar' +//! "#; +//! assert_eq!(doc.to_string(), expected); +//! # } +//! # } +//! ``` +//! +//! ## Limitations +//! +//! Things it does not preserve: +//! +//! * Order of dotted keys, see [issue](https://github.com/toml-rs/toml/issues/163). +//! +//! [`toml`]: https://docs.rs/toml/latest/toml/ + +// https://github.com/Marwes/combine/issues/172 +#![recursion_limit = "256"] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![warn(missing_docs)] +#![warn(clippy::print_stderr)] +#![warn(clippy::print_stdout)] + +mod array; +mod array_of_tables; +mod document; +#[cfg(feature = "display")] +mod encode; +mod error; +mod index; +mod inline_table; +mod internal_string; +mod item; +mod key; +#[cfg(feature = "parse")] +mod parser; +mod raw_string; +mod repr; +mod table; +mod value; + +#[cfg(feature = "serde")] +pub mod de; +#[cfg(feature = "serde")] +pub mod ser; + +pub mod visit; +pub mod visit_mut; + +pub use crate::array::{Array, ArrayIntoIter, ArrayIter, ArrayIterMut}; +pub use crate::array_of_tables::{ + ArrayOfTables, ArrayOfTablesIntoIter, ArrayOfTablesIter, ArrayOfTablesIterMut, +}; +/// Deprecated, replaced with [`DocumentMut`] +#[deprecated(since = "0.22.6", note = "Replaced with `DocumentMut`")] +pub type Document = DocumentMut; +pub use crate::document::DocumentMut; +pub use crate::document::ImDocument; +pub use crate::error::TomlError; +pub use crate::inline_table::{ + InlineEntry, InlineOccupiedEntry, InlineTable, InlineTableIntoIter, InlineTableIter, + InlineTableIterMut, InlineVacantEntry, +}; +pub use crate::internal_string::InternalString; +pub use crate::item::{array, table, value, Item}; +pub use crate::key::{Key, KeyMut}; +pub use crate::raw_string::RawString; +pub use crate::repr::{Decor, Formatted, Repr}; +pub use crate::table::{ + Entry, IntoIter, Iter, IterMut, OccupiedEntry, Table, TableLike, VacantEntry, +}; +pub use crate::value::Value; +pub use toml_datetime::*; + +// Prevent users from some traits. +pub(crate) mod private { + pub trait Sealed {} + impl Sealed for usize {} + impl Sealed for str {} + impl Sealed for String {} + impl Sealed for i64 {} + impl Sealed for f64 {} + impl Sealed for bool {} + impl Sealed for crate::Datetime {} + impl<'a, T: ?Sized> Sealed for &'a T where T: Sealed {} + impl Sealed for crate::Table {} + impl Sealed for crate::InlineTable {} +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/parser/array.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/parser/array.rs new file mode 100644 index 000000000000..d246c63d93ec --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/parser/array.rs @@ -0,0 +1,134 @@ +use winnow::combinator::cut_err; +use winnow::combinator::delimited; +use winnow::combinator::opt; +use winnow::combinator::peek; +use winnow::combinator::separated; +use winnow::combinator::trace; + +use crate::parser::trivia::ws_comment_newline; +use crate::parser::value::value; +use crate::{Array, Item, RawString}; + +use crate::parser::prelude::*; + +// ;; Array + +// array = array-open array-values array-close +pub(crate) fn array<'i>(input: &mut Input<'i>) -> PResult { + trace("array", move |input: &mut Input<'i>| { + delimited( + ARRAY_OPEN, + cut_err(array_values), + cut_err(ARRAY_CLOSE) + .context(StrContext::Label("array")) + .context(StrContext::Expected(StrContextValue::CharLiteral(']'))), + ) + .parse_next(input) + }) + .parse_next(input) +} + +// note: we're omitting ws and newlines here, because +// they should be part of the formatted values +// array-open = %x5B ws-newline ; [ +pub(crate) const ARRAY_OPEN: u8 = b'['; +// array-close = ws-newline %x5D ; ] +const ARRAY_CLOSE: u8 = b']'; +// array-sep = ws %x2C ws ; , Comma +const ARRAY_SEP: u8 = b','; + +// array-values = ws-comment-newline val ws-comment-newline array-sep array-values +// array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] +pub(crate) fn array_values(input: &mut Input<'_>) -> PResult { + if peek(opt(ARRAY_CLOSE)).parse_next(input)?.is_some() { + // Optimize for empty arrays, avoiding `value` from being expected to fail + return Ok(Array::new()); + } + + let array = separated(0.., array_value, ARRAY_SEP).parse_next(input)?; + let mut array = Array::with_vec(array); + if !array.is_empty() { + let comma = opt(ARRAY_SEP).parse_next(input)?.is_some(); + array.set_trailing_comma(comma); + } + let trailing = ws_comment_newline.span().parse_next(input)?; + array.set_trailing(RawString::with_span(trailing)); + + Ok(array) +} + +pub(crate) fn array_value(input: &mut Input<'_>) -> PResult { + let prefix = ws_comment_newline.span().parse_next(input)?; + let value = value.parse_next(input)?; + let suffix = ws_comment_newline.span().parse_next(input)?; + let value = value.decorated(RawString::with_span(prefix), RawString::with_span(suffix)); + let value = Item::Value(value); + Ok(value) +} + +#[cfg(test)] +#[cfg(feature = "parse")] +#[cfg(feature = "display")] +mod test { + use super::*; + + #[test] + fn arrays() { + let inputs = [ + r#"[]"#, + r#"[ ]"#, + r#"[ + 1, 2, 3 +]"#, + r#"[ + 1, + 2, # this is ok +]"#, + r#"[# comment +# comment2 + + + ]"#, + r#"[# comment +# comment2 + 1 + +#sd +, +# comment3 + + ]"#, + r#"[1]"#, + r#"[1,]"#, + r#"[ "all", 'strings', """are the same""", '''type''']"#, + r#"[ 100, -2,]"#, + r#"[1, 2, 3]"#, + r#"[1.1, 2.1, 3.1]"#, + r#"["a", "b", "c"]"#, + r#"[ [ 1, 2 ], [3, 4, 5] ]"#, + r#"[ [ 1, 2 ], ["a", "b", "c"] ]"#, + r#"[ { x = 1, a = "2" }, {a = "a",b = "b", c = "c"} ]"#, + ]; + for input in inputs { + dbg!(input); + let mut parsed = array.parse(new_input(input)); + if let Ok(parsed) = &mut parsed { + parsed.despan(input); + } + assert_eq!(parsed.map(|a| a.to_string()), Ok(input.to_owned())); + } + } + + #[test] + fn invalid_arrays() { + let invalid_inputs = [r#"["#, r#"[,]"#, r#"[,2]"#, r#"[1e165,,]"#]; + for input in invalid_inputs { + dbg!(input); + let mut parsed = array.parse(new_input(input)); + if let Ok(parsed) = &mut parsed { + parsed.despan(input); + } + assert!(parsed.is_err()); + } + } +} diff --git a/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/parser/datetime.rs b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/parser/datetime.rs new file mode 100644 index 000000000000..69c8d7f3f63a --- /dev/null +++ b/third_party/rust/chromium_crates_io/vendor/toml_edit-0.22.22/src/parser/datetime.rs @@ -0,0 +1,472 @@ +use std::ops::RangeInclusive; + +use crate::parser::error::CustomError; +use crate::parser::prelude::*; +use crate::parser::trivia::from_utf8_unchecked; + +use toml_datetime::{Date, Datetime, Offset, Time}; +use winnow::combinator::alt; +use winnow::combinator::cut_err; +use winnow::combinator::opt; +use winnow::combinator::preceded; +use winnow::combinator::trace; +use winnow::stream::Stream as _; +use winnow::token::one_of; +use winnow::token::take_while; + +// ;; Date and Time (as defined in RFC 3339) + +// date-time = offset-date-time / local-date-time / local-date / local-time +// offset-date-time = full-date time-delim full-time +// local-date-time = full-date time-delim partial-time +// local-date = full-date +// local-time = partial-time +// full-time = partial-time time-offset +pub(crate) fn date_time(input: &mut Input<'_>) -> PResult { + trace( + "date-time", + alt(( + (full_date, opt((time_delim, partial_time, opt(time_offset)))) + .map(|(date, opt)| { + match opt { + // Offset Date-Time + Some((_, time, offset)) => Datetime { + date: Some(date), + time: Some(time), + offset, + }, + // Local Date + None => Datetime { + date: Some(date), + time: None, + offset: None, + }, + } + }) + .context(StrContext::Label("date-time")), + partial_time + .map(|t| t.into()) + .context(StrContext::Label("time")), + )), + ) + .parse_next(input) +} + +// full-date = date-fullyear "-" date-month "-" date-mday +pub(crate) fn full_date(input: &mut Input<'_>) -> PResult { + trace("full-date", full_date_).parse_next(input) +} + +fn full_date_(input: &mut Input<'_>) -> PResult { + let year = date_fullyear.parse_next(input)?; + let _ = b'-'.parse_next(input)?; + let month = cut_err(date_month).parse_next(input)?; + let _ = cut_err(b'-').parse_next(input)?; + let day_start = input.checkpoint(); + let day = cut_err(date_mday).parse_next(input)?; + + let is_leap_year = (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); + let max_days_in_month = match month { + 2 if is_leap_year => 29, + 2 => 28, + 4 | 6 | 9 | 11 => 30, + _ => 31, + }; + if max_days_in_month < day { + input.reset(&day_start); + return Err(winnow::error::ErrMode::from_external_error( + input, + winnow::error::ErrorKind::Verify, + CustomError::OutOfRange, + ) + .cut()); + } + + Ok(Date { year, month, day }) +} + +// partial-time = time-hour ":" time-minute ":" time-second [time-secfrac] +pub(crate) fn partial_time(input: &mut Input<'_>) -> PResult